]> git.sesse.net Git - nageru/commitdiff
Fix a Clang 19 warning. master
authorSteinar H. Gunderson <sgunderson@bigfoot.com>
Mon, 13 May 2024 22:01:14 +0000 (00:01 +0200)
committerSteinar H. Gunderson <sgunderson@bigfoot.com>
Mon, 13 May 2024 22:01:14 +0000 (00:01 +0200)
Clang rightfully pointed out that VLAs are a non-standard extension.
We can do just fine with three small heap allocations per software-decoded
JPEG (the main path is VA-API anyway).

26 files changed:
futatabi/jpeg_frame_view.cpp
nageru/analyzer.cpp
nageru/av1_encoder.cpp
nageru/benchmark_audio_mixer.cpp
nageru/context.h [deleted file]
nageru/correlation_measurer.cpp
nageru/decklink_capture.cpp
nageru/defs.h
nageru/ffmpeg_capture.cpp
nageru/ffmpeg_capture.h
nageru/filter.cpp
nageru/input_mapping_dialog.cpp
nageru/kaeru.cpp
nageru/midi_mapper.cpp
nageru/midi_mapping_dialog.cpp
nageru/mixer.cpp
nageru/mixer.h
nageru/nageru_cef_app.cpp
nageru/pbo_frame_allocator.cpp
nageru/pbo_frame_allocator.h
nageru/resampling_queue.cpp
nageru/theme.cpp
nageru/theme.h
nageru/timecode_renderer.cpp
nageru/v210_converter.cpp
nageru/x264_dynamic.cpp

index 6ab19482fb93772956036c19175b6ecef36262e3..6dafb3e9ac7eb8adc5723a2cd61ab50a7b03de8c 100644 (file)
@@ -182,8 +182,10 @@ shared_ptr<Frame> decode_jpeg(const string &jpeg)
        }
 
        if (!error_mgr.run([&dinfo, &y_pix, &cb_pix, &cr_pix, pitch_y, pitch_chroma, v_mcu_size, mcu_height_blocks] {
        }
 
        if (!error_mgr.run([&dinfo, &y_pix, &cb_pix, &cr_pix, pitch_y, pitch_chroma, v_mcu_size, mcu_height_blocks] {
-                   JSAMPROW yptr[v_mcu_size], cbptr[v_mcu_size], crptr[v_mcu_size];
-                   JSAMPARRAY data[3] = { yptr, cbptr, crptr };
+                   unique_ptr<JSAMPROW[]> yptr(new JSAMPROW[v_mcu_size]);
+                   unique_ptr<JSAMPROW[]> cbptr(new JSAMPROW[v_mcu_size]);
+                   unique_ptr<JSAMPROW[]> crptr(new JSAMPROW[v_mcu_size]);
+                   JSAMPARRAY data[3] = { yptr.get(), cbptr.get(), crptr.get() };
                    for (unsigned y = 0; y < mcu_height_blocks; ++y) {
                            // NOTE: The last elements of cbptr/crptr will be unused for vertically subsampled chroma.
                            for (unsigned yy = 0; yy < v_mcu_size; ++yy) {
                    for (unsigned y = 0; y < mcu_height_blocks; ++y) {
                            // NOTE: The last elements of cbptr/crptr will be unused for vertically subsampled chroma.
                            for (unsigned yy = 0; yy < v_mcu_size; ++yy) {
index 5ee873be9cce81e5fb19009009684987558b1273..a9bfb6da25147f2ff01f067b928695f62e8ed480 100644 (file)
@@ -71,9 +71,9 @@ Analyzer::Analyzer()
        signal_changed();
        ui->grabbed_frame_label->installEventFilter(this);
 
        signal_changed();
        ui->grabbed_frame_label->installEventFilter(this);
 
-        glGenBuffers(1, &pbo);
-        glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, pbo);
-        glBufferData(GL_PIXEL_PACK_BUFFER_ARB, global_flags.width * global_flags.height * 4, nullptr, GL_STREAM_READ);
+       glGenBuffers(1, &pbo);
+       glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, pbo);
+       glBufferData(GL_PIXEL_PACK_BUFFER_ARB, global_flags.width * global_flags.height * 4, nullptr, GL_STREAM_READ);
 }
 
 Analyzer::~Analyzer()
 }
 
 Analyzer::~Analyzer()
@@ -238,7 +238,7 @@ bool Analyzer::eventFilter(QObject *watched, QEvent *event)
                ui->blue_label->setText(u8"—");
                ui->hex_label->setText(u8"#—");
        }
                ui->blue_label->setText(u8"—");
                ui->hex_label->setText(u8"#—");
        }
-        return false;
+       return false;
 }
 
 void Analyzer::grab_pixel(int x, int y)
 }
 
 void Analyzer::grab_pixel(int x, int y)
index 13f0e30e3e9162fd5689883290d448205ae07e22..bb6c17be145f71e04913fd23811c217f31c90738 100644 (file)
@@ -212,7 +212,7 @@ void AV1Encoder::init_av1()
                global_headers = string(reinterpret_cast<const char *>(header->p_buffer), header->n_filled_len);
 
                svt_av1_enc_stream_header_release(header);  // Don't care about errors.
                global_headers = string(reinterpret_cast<const char *>(header->p_buffer), header->n_filled_len);
 
                svt_av1_enc_stream_header_release(header);  // Don't care about errors.
-          }
+       }
 }
 
 void AV1Encoder::encoder_thread_func()
 }
 
 void AV1Encoder::encoder_thread_func()
index 935295b3bee3ac8996e3122ac0387345f8654738..9eefc73ed4e2ca31220f5a8737688e4774c944f9 100644 (file)
@@ -51,9 +51,9 @@ void reset_lcgrand()
 
 void callback(float level_lufs, float peak_db,
               std::vector<AudioMixer::BusLevel> bus_levels,
 
 void callback(float level_lufs, float peak_db,
               std::vector<AudioMixer::BusLevel> bus_levels,
-             float global_level_lufs, float range_low_lufs, float range_high_lufs,
-             float final_makeup_gain_db,
-             float correlation)
+              float global_level_lufs, float range_low_lufs, float range_high_lufs,
+              float final_makeup_gain_db,
+              float correlation)
 {
        // Empty.
 }
 {
        // Empty.
 }
diff --git a/nageru/context.h b/nageru/context.h
deleted file mode 100644 (file)
index 13dbf24..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-
-// Needs to be in its own file because Qt and libepoxy seemingly don't coexist well
-// within the same file.
-
-class QSurface;
-class QOpenGLContext;
-class QSurfaceFormat;
-class QGLWidget;
-
-extern bool using_egl;
-extern QGLWidget *global_share_widget;
-QSurface *create_surface(const QSurfaceFormat &format);
-QSurface *create_surface_with_same_format(const QSurface *surface);
-QOpenGLContext *create_context(const QSurface *surface);
-bool make_current(QOpenGLContext *context, QSurface *surface);
-void delete_context(QOpenGLContext *context);
index 6be16b88b0ad53ebc840afdcb9aabc415ad16610..0fa4749cbba3a849a0f2b329b7849dec0f3ba30e 100644 (file)
@@ -28,7 +28,7 @@ using namespace std;
 
 CorrelationMeasurer::CorrelationMeasurer(unsigned sample_rate,
                                          float lowpass_cutoff_hz,
 
 CorrelationMeasurer::CorrelationMeasurer(unsigned sample_rate,
                                          float lowpass_cutoff_hz,
-                                        float falloff_seconds)
+                                         float falloff_seconds)
     : w1(2.0 * M_PI * lowpass_cutoff_hz / sample_rate),
       w2(1.0 / (falloff_seconds * sample_rate))
 {
     : w1(2.0 * M_PI * lowpass_cutoff_hz / sample_rate),
       w2(1.0 / (falloff_seconds * sample_rate))
 {
index e239ec964f6f70d17fc13c24494ead77e7d726b4..90d86b15dd580f0fe73d96148fa4f728ff280c9f 100644 (file)
@@ -256,7 +256,14 @@ HRESULT STDMETHODCALLTYPE DeckLinkCapture::VideoInputFrameArrived(
                        assert(stride == width * 2);
                }
 
                        assert(stride == width * 2);
                }
 
-               current_video_frame = video_frame_allocator->create_frame(width, height, stride);
+               if (width * stride > FRAME_SIZE) {
+                       // TODO: If we had an OpenGL context here, calling create_frame()
+                       // would be completely fine.
+                       fprintf(stderr, "Card %u: Captured frame %d x %d (stride %d) would be larger than supported frame size (%d > %d), skipping.\n",
+                               card_index, width, height, stride, width * stride, FRAME_SIZE);
+               } else {
+                       current_video_frame = video_frame_allocator->create_frame(width, height, stride);
+               }
                if (current_video_frame.data != nullptr) {
                        const uint8_t *src;
                        video_frame->GetBytes((void **)&src);
                if (current_video_frame.data != nullptr) {
                        const uint8_t *src;
                        video_frame->GetBytes((void **)&src);
index 09743a5abe5e72a9a8c12487c28d4fd756daeb5f..369c9b8ba504845a6930738846289bc491a0e52c 100644 (file)
@@ -6,7 +6,18 @@
 // #define MAX_VIDEO_CARDS 16  // defined in shared_defs.h.
 #define MAX_ALSA_CARDS 16
 #define MAX_BUSES 256  // Audio buses.
 // #define MAX_VIDEO_CARDS 16  // defined in shared_defs.h.
 #define MAX_ALSA_CARDS 16
 #define MAX_BUSES 256  // Audio buses.
-#define FRAME_SIZE (8 << 20)  // 8 MB. (FIXME: Not enough for a 2160p frame!)
+
+// FRAME_SIZE is the default frame size, in bytes. FFmpeg inputs (video files and SRT streams)
+// can allocate larger frames as needed; USB and DeckLink outputs always use FRAME_SIZE.
+// We should eventually add support for at least DeckLink outputs, allowing us to capture
+// 2160p frames. Also, it would allow us to lower the default frame size to the maximum
+// bmusb supports (2 MB just about covers 1080i 4:2:2, then add some for 10-bit?) to waste
+// less memory.
+//
+// As a general sanity check, we also have a MAX_FRAME_SIZE that even dynamic allocation
+// will not go past.
+#define FRAME_SIZE (8 << 20)  // 8 MB (just enough for 1080p RGBA).
+#define MAX_FRAME_SIZE (140 << 20)  // 140 MB; enough for 8192*4320 RGBA and then some.
 
 // For deinterlacing. See also comments on InputState.
 #define FRAME_HISTORY_LENGTH 5
 
 // For deinterlacing. See also comments on InputState.
 #define FRAME_HISTORY_LENGTH 5
index 6dbcbd602b79b544c8319ddd9446b9992c66ef10..14ad91f0c64baa01b686680baada50d7b5d96977 100644 (file)
@@ -3,8 +3,8 @@
 #include "shared/shared_defs.h"
 
 #include <assert.h>
 #include "shared/shared_defs.h"
 
 #include <assert.h>
-#include <cerrno>
-#include <ctime>
+#include <errno.h>
+#include <epoxy/egl.h>
 #include <limits>
 #include <map>
 #include <memory>
 #include <limits>
 #include <map>
 #include <memory>
@@ -13,6 +13,7 @@
 #include <movit/ycbcr.h>
 #include <mutex>
 #include <pthread.h>
 #include <movit/ycbcr.h>
 #include <mutex>
 #include <pthread.h>
+#include <time.h>
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -20,6 +21,7 @@
 #include <string>
 #include <sys/stat.h>
 #include <thread>
 #include <string>
 #include <sys/stat.h>
 #include <thread>
+#include <QSurface>
 
 extern "C" {
 #include <libavcodec/avcodec.h>
 
 extern "C" {
 #include <libavcodec/avcodec.h>
@@ -56,6 +58,7 @@ extern "C" {
 #include <movit/colorspace_conversion_effect.h>
 
 #include "bmusb/bmusb.h"
 #include <movit/colorspace_conversion_effect.h>
 
 #include "bmusb/bmusb.h"
+#include "shared/context.h"
 #include "shared/ffmpeg_raii.h"
 #include "ffmpeg_util.h"
 #include "flags.h"
 #include "shared/ffmpeg_raii.h"
 #include "ffmpeg_util.h"
 #include "flags.h"
@@ -283,8 +286,8 @@ RGBTriplet get_neutral_color(AVDictionary *metadata)
 
 }  // namespace
 
 
 }  // namespace
 
-FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
-       : filename(filename), width(width), height(height), video_timebase{1, 1}
+FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height, QSurface *surface)
+       : filename(filename), width(width), height(height), video_timebase{1, 1}, surface(surface)
 {
        description = "Video: " + filename;
 
 {
        description = "Video: " + filename;
 
@@ -294,12 +297,13 @@ FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned he
 }
 
 #ifdef HAVE_SRT
 }
 
 #ifdef HAVE_SRT
-FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id)
+FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id, QSurface *surface)
        : srt_sock(srt_sock),
          width(0),  // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general.
          height(0),
          pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
        : srt_sock(srt_sock),
          width(0),  // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general.
          height(0),
          pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
-         video_timebase{1, 1}
+         video_timebase{1, 1},
+         surface(surface)
 {
        if (stream_id.empty()) {
                description = "SRT stream";
 {
        if (stream_id.empty()) {
                description = "SRT stream";
@@ -323,6 +327,7 @@ FFmpegCapture::~FFmpegCapture()
                srt_close(srt_sock);
        }
 #endif
                srt_close(srt_sock);
        }
 #endif
+       delete surface;
 }
 
 void FFmpegCapture::configure_card()
 }
 
 void FFmpegCapture::configure_card()
@@ -383,6 +388,21 @@ void FFmpegCapture::producer_thread_func()
        snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
        pthread_setname_np(pthread_self(), thread_name);
 
        snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
        pthread_setname_np(pthread_self(), thread_name);
 
+       // We need a context in case create_frame() needs to reallocate something.
+       // (If none is given, we are probably in Kaeru, which uses MallocFrameAllocator
+       // anyway, which doesn't reallocate currently and definitely doesn't need
+       // an active OpenGL context to do so.)
+       QOpenGLContext *context = nullptr;
+       if (surface != nullptr) {
+               context = create_context(this->surface);
+               eglBindAPI(EGL_OPENGL_API);
+               if (!make_current(context, this->surface)) {
+                       printf("display=%p surface=%p context=%p curr=%p err=%d\n", eglGetCurrentDisplay(), this->surface, context, eglGetCurrentContext(),
+                               eglGetError());
+                       abort();
+               }
+       }
+
        while (!producer_thread_should_quit.should_quit()) {
                string filename_copy;
                {
        while (!producer_thread_should_quit.should_quit()) {
                string filename_copy;
                {
@@ -426,9 +446,11 @@ void FFmpegCapture::producer_thread_func()
        }
 
        if (has_dequeue_callbacks) {
        }
 
        if (has_dequeue_callbacks) {
-                dequeue_cleanup_callback();
+               dequeue_cleanup_callback();
                has_dequeue_callbacks = false;
                has_dequeue_callbacks = false;
-        }
+       }
+
+       delete_context(context);
 }
 
 void FFmpegCapture::send_disconnected_frame()
 }
 
 void FFmpegCapture::send_disconnected_frame()
@@ -1094,9 +1116,9 @@ void FFmpegCapture::convert_audio(const AVFrame *audio_avframe, FrameAllocator::
        int out_samples = swr_convert(resampler, &data, num_samples_room,
                const_cast<const uint8_t **>(audio_avframe->data), audio_avframe->nb_samples);
        if (out_samples < 0) {
        int out_samples = swr_convert(resampler, &data, num_samples_room,
                const_cast<const uint8_t **>(audio_avframe->data), audio_avframe->nb_samples);
        if (out_samples < 0) {
-                fprintf(stderr, "Audio conversion failed.\n");
-                abort();
-        }
+               fprintf(stderr, "Audio conversion failed.\n");
+               abort();
+       }
 
        audio_frame->len += out_samples * bytes_per_sample;
 }
 
        audio_frame->len += out_samples * bytes_per_sample;
 }
@@ -1125,7 +1147,7 @@ UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string &
 {
        *error = false;
 
 {
        *error = false;
 
-       UniqueFrame video_frame(video_frame_allocator->alloc_frame());
+       UniqueFrame video_frame(video_frame_allocator->create_frame(frame->width, frame->height, frame->width));
        if (video_frame->data == nullptr) {
                return video_frame;
        }
        if (video_frame->data == nullptr) {
                return video_frame;
        }
@@ -1190,8 +1212,8 @@ UniqueFrame FFmpegCapture::make_video_frame(const AVFrame *frame, const string &
        // FIXME: Currently, if the video is too high-res for one of the allocated
        // frames, we simply refuse to scale it here to avoid crashes. It would be better
        // if we could somehow signal getting larger frames, especially as 4K is a thing now.
        // FIXME: Currently, if the video is too high-res for one of the allocated
        // frames, we simply refuse to scale it here to avoid crashes. It would be better
        // if we could somehow signal getting larger frames, especially as 4K is a thing now.
-       if (video_frame->len > FRAME_SIZE) {
-               fprintf(stderr, "%s: Decoded frame would be larger than supported FRAME_SIZE (%zu > %u), not decoding.\n", pathname.c_str(), video_frame->len, FRAME_SIZE);
+       if (video_frame->len > video_frame->size) {
+               fprintf(stderr, "%s: Decoded frame would be larger than supported frame size (%zu > %zu), not decoding.\n", pathname.c_str(), video_frame->len, video_frame->size);
                *error = true;
                return video_frame;
        }
                *error = true;
                return video_frame;
        }
index 122bf86726ba4cb6d73a70a0ca81166d6e7d7431..b974583a10ef22d3f339859513d74c4797bcb673 100644 (file)
@@ -60,14 +60,15 @@ struct AVFormatContext;
 struct AVFrame;
 struct AVRational;
 struct AVPacket;
 struct AVFrame;
 struct AVRational;
 struct AVPacket;
+class QSurface;
 
 class FFmpegCapture : public bmusb::CaptureInterface
 {
 public:
 
 class FFmpegCapture : public bmusb::CaptureInterface
 {
 public:
-       FFmpegCapture(const std::string &filename, unsigned width, unsigned height);
+       FFmpegCapture(const std::string &filename, unsigned width, unsigned height, QSurface *surface);
 #ifdef HAVE_SRT
        // Takes ownership of the SRT client socket.
 #ifdef HAVE_SRT
        // Takes ownership of the SRT client socket.
-       FFmpegCapture(int srt_sock, const std::string &stream_id);
+       FFmpegCapture(int srt_sock, const std::string &stream_id, QSurface *surface);
 #endif
        ~FFmpegCapture();
 
 #endif
        ~FFmpegCapture();
 
@@ -351,6 +352,7 @@ private:
        // -1 is strictly speaking outside the range of the enum, but hopefully, it will be alright.
        AVColorSpace last_colorspace = static_cast<AVColorSpace>(-1);
        AVChromaLocation last_chroma_location = static_cast<AVChromaLocation>(-1);
        // -1 is strictly speaking outside the range of the enum, but hopefully, it will be alright.
        AVColorSpace last_colorspace = static_cast<AVColorSpace>(-1);
        AVChromaLocation last_chroma_location = static_cast<AVChromaLocation>(-1);
+       QSurface *const surface;
 };
 
 #endif  // !defined(_FFMPEG_CAPTURE_H)
 };
 
 #endif  // !defined(_FFMPEG_CAPTURE_H)
index 835ec6c887b191cd13a28ed02d00a823dd45633b..e4669515fab73d0298d83b8fd303d876f64da5ac 100644 (file)
@@ -23,8 +23,8 @@ using namespace std;
 #else  // !defined(__SSE__)
 
 union uint_float {
 #else  // !defined(__SSE__)
 
 union uint_float {
-        float f;
-        unsigned int i;
+       float f;
+       unsigned int i;
 };
 #define early_undenormalise(sample) { \
         uint_float uf; \
 };
 #define early_undenormalise(sample) { \
         uint_float uf; \
index 2fe7e34ce87e8b2bb6130c05c61480bf878648ed..5f65d566e624578f9099bb3efbd414325af0e0db 100644 (file)
@@ -274,7 +274,7 @@ void InputMappingDialog::remove_clicked()
 void InputMappingDialog::updown_clicked(int direction)
 {
        assert(ui->table->selectedRanges().size() == 1);
 void InputMappingDialog::updown_clicked(int direction)
 {
        assert(ui->table->selectedRanges().size() == 1);
-       const QTableWidgetSelectionRange &range = ui->table->selectedRanges()[0];
+       QTableWidgetSelectionRange range = ui->table->selectedRanges()[0];
        int a_row = range.bottomRow();
        int b_row = range.bottomRow() + direction;
 
        int a_row = range.bottomRow();
        int b_row = range.bottomRow() + direction;
 
index 2bdc6861f2dffa5a17a0d81922e38adcbaf0b95b..caa71c6527fc3bd0cdde79dc88cc9154bae34e5b 100644 (file)
@@ -267,7 +267,7 @@ int main(int argc, char *argv[])
        }
        global_x264_encoder = x264_encoder.get();
 
        }
        global_x264_encoder = x264_encoder.get();
 
-       FFmpegCapture video(argv[optind], global_flags.width, global_flags.height);
+       FFmpegCapture video(argv[optind], global_flags.width, global_flags.height, /*surface=*/nullptr);
        video.set_pixel_format(FFmpegCapture::PixelFormat_NV12);
        if (global_flags.transcode_video) {
                video.set_frame_callback(bind(video_frame_callback, &video, x264_encoder.get(), audio_encoder.get(), _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11));
        video.set_pixel_format(FFmpegCapture::PixelFormat_NV12);
        if (global_flags.transcode_video) {
                video.set_frame_callback(bind(video_frame_callback, &video, x264_encoder.get(), audio_encoder.get(), _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11));
index 2fd6dbb9ab04c11e72fb9c8efb1f97125c128f74..e7b9068ef500b958e2af39e28a8aa33480e2f100 100644 (file)
@@ -49,7 +49,7 @@ void MIDIMapper::set_midi_mapping(const MIDIMappingProto &new_mapping)
        }
 
        num_controller_banks = min(max(mapping_proto->num_controller_banks(), 1), 5);
        }
 
        num_controller_banks = min(max(mapping_proto->num_controller_banks(), 1), 5);
-        current_controller_bank = 0;
+       current_controller_bank = 0;
 
        receiver->clear_all_highlights();
        update_highlights();
 
        receiver->clear_all_highlights();
        update_highlights();
index 76776d2205ad454a92fb65dc7e53d1697d6eceb6..2fa8fbe891a7f69abdbb4b0686e5a82908eb70db 100644 (file)
@@ -158,7 +158,7 @@ int get_light_mapping(const MIDIMappingProto &mapping_proto, size_t bus_idx, int
 
 MIDIMappingDialog::MIDIMappingDialog(MIDIMapper *mapper)
        : ui(new Ui::MIDIMappingDialog),
 
 MIDIMappingDialog::MIDIMappingDialog(MIDIMapper *mapper)
        : ui(new Ui::MIDIMappingDialog),
-          mapper(mapper)
+         mapper(mapper)
 {
        ui->setupUi(this);
 
 {
        ui->setupUi(this);
 
index b3e60feba560a261d25e63b2fc2fc42c99505bc3..b986ed5fd8c28698346c279d6ebd1108c40539cf 100644 (file)
@@ -409,7 +409,7 @@ Mixer::Mixer(const QSurfaceFormat &format)
        }
 
        // Must be instantiated after VideoEncoder has initialized global_flags.use_zerocopy.
        }
 
        // Must be instantiated after VideoEncoder has initialized global_flags.use_zerocopy.
-       theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get()));
+       theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get(), create_surface(format)));
 
        // Must be instantiated after the theme, as the theme decides the number of FFmpeg inputs.
        std::vector<FFmpegCapture *> video_inputs = theme->get_video_inputs();
 
        // Must be instantiated after the theme, as the theme decides the number of FFmpeg inputs.
        std::vector<FFmpegCapture *> video_inputs = theme->get_video_inputs();
@@ -646,9 +646,6 @@ void Mixer::configure_card(unsigned card_index, CaptureInterface *capture, CardT
                        card->frame_allocator->reconfigure(pixel_format, FRAME_SIZE, global_flags.width, global_flags.height, card_index, mjpeg_encoder.get());
                }
                card->capture->set_video_frame_allocator(card->frame_allocator.get());
                        card->frame_allocator->reconfigure(pixel_format, FRAME_SIZE, global_flags.width, global_flags.height, card_index, mjpeg_encoder.get());
                }
                card->capture->set_video_frame_allocator(card->frame_allocator.get());
-               if (card->surface == nullptr) {
-                       card->surface = create_surface_with_same_format(mixer_surface);
-               }
                while (!card->new_frames.empty()) card->new_frames.pop_front();
                card->last_timecode = -1;
                card->capture->set_pixel_format(pixel_format);
                while (!card->new_frames.empty()) card->new_frames.pop_front();
                card->last_timecode = -1;
                card->capture->set_pixel_format(pixel_format);
@@ -812,7 +809,7 @@ int unwrap_timecode(uint16_t current_wrapped, int last)
 
 void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                      FrameAllocator::Frame video_frame, size_t video_offset, VideoFormat video_format,
 
 void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
                      FrameAllocator::Frame video_frame, size_t video_offset, VideoFormat video_format,
-                    FrameAllocator::Frame audio_frame, size_t audio_offset, AudioFormat audio_format)
+                     FrameAllocator::Frame audio_frame, size_t audio_offset, AudioFormat audio_format)
 {
        DeviceSpec device{InputSourceType::CAPTURE_CARD, card_index};
        CaptureCard *card = &cards[card_index];
 {
        DeviceSpec device{InputSourceType::CAPTURE_CARD, card_index};
        CaptureCard *card = &cards[card_index];
@@ -1613,7 +1610,7 @@ void Mixer::handle_hotplugged_cards()
                                fprintf(stderr, "New SRT stream connected (%s), choosing slot %d.\n", stream_id.c_str(), free_card_index);
                        }
                        CaptureCard *card = &cards[free_card_index];
                                fprintf(stderr, "New SRT stream connected (%s), choosing slot %d.\n", stream_id.c_str(), free_card_index);
                        }
                        CaptureCard *card = &cards[free_card_index];
-                       FFmpegCapture *capture = new FFmpegCapture(sock, stream_id);
+                       FFmpegCapture *capture = new FFmpegCapture(sock, stream_id, create_surface_with_same_format(mixer_surface));
                        capture->set_card_index(free_card_index);
                        configure_card(free_card_index, capture, CardType::FFMPEG_INPUT, /*output=*/nullptr, /*is_srt_card=*/true);
                        card->srt_metrics.update_srt_stats(sock);  // Initial zero stats.
                        capture->set_card_index(free_card_index);
                        configure_card(free_card_index, capture, CardType::FFMPEG_INPUT, /*output=*/nullptr, /*is_srt_card=*/true);
                        card->srt_metrics.update_srt_stats(sock);  // Initial zero stats.
index 5549a245064d613a0632cdd3e8b4786a2fcde8ff..c42d8f1a44c4f0a104201c2c8d631db317f73d33 100644 (file)
@@ -478,9 +478,6 @@ private:
 
                std::unique_ptr<PBOFrameAllocator> frame_allocator;
 
 
                std::unique_ptr<PBOFrameAllocator> frame_allocator;
 
-               // Stuff for the OpenGL context (for texture uploading).
-               QSurface *surface = nullptr;
-
                struct NewFrame {
                        RefCountedFrame frame;
                        int64_t length;  // In TIMEBASE units.
                struct NewFrame {
                        RefCountedFrame frame;
                        int64_t length;  // In TIMEBASE units.
index b74c04b1988aa728b2c7d50ee7f50db9e84f2479..70b9afe65882ad15e7633b0b8fed49efd7737f42 100644 (file)
@@ -18,6 +18,7 @@ void NageruCefApp::OnBeforeCommandLineProcessing(
        command_line->AppendSwitch("disable-gpu");
        command_line->AppendSwitch("disable-gpu-compositing");
        command_line->AppendSwitch("enable-begin-frame-scheduling");
        command_line->AppendSwitch("disable-gpu");
        command_line->AppendSwitch("disable-gpu-compositing");
        command_line->AppendSwitch("enable-begin-frame-scheduling");
+       command_line->AppendSwitchWithValue("autoplay-policy", "no-user-gesture-required");
 
        // https://bitbucket.org/chromiumembedded/cef/issues/2717/xmlhttprequest-empty-responsetext
        command_line->AppendSwitch("disable-web-security");
 
        // https://bitbucket.org/chromiumembedded/cef/issues/2717/xmlhttprequest-empty-responsetext
        command_line->AppendSwitch("disable-web-security");
index 709a2bf359e505ab7e089f567fce33b502427834..6ebe13ae4d6f731e7348dc5f6dbcd0e61fefaedb 100644 (file)
@@ -13,6 +13,7 @@
 #include <va/va.h>
 
 #include "mjpeg_encoder.h"
 #include <va/va.h>
 
 #include "mjpeg_encoder.h"
+#include "defs.h"
 #include "shared/va_resource_pool.h"
 #include "v210_converter.h"
 #include "shared/va_display.h"
 #include "shared/va_resource_pool.h"
 #include "v210_converter.h"
 #include "shared/va_display.h"
@@ -34,7 +35,7 @@ void set_clamp_to_edge()
 }  // namespace
 
 PBOFrameAllocator::PBOFrameAllocator(bmusb::PixelFormat pixel_format, size_t frame_size, GLuint width, GLuint height, unsigned card_index, MJPEGEncoder *mjpeg_encoder, size_t num_queued_frames, GLenum buffer, GLenum permissions, GLenum map_bits)
 }  // namespace
 
 PBOFrameAllocator::PBOFrameAllocator(bmusb::PixelFormat pixel_format, size_t frame_size, GLuint width, GLuint height, unsigned card_index, MJPEGEncoder *mjpeg_encoder, size_t num_queued_frames, GLenum buffer, GLenum permissions, GLenum map_bits)
-        : card_index(card_index),
+       : card_index(card_index),
          mjpeg_encoder(mjpeg_encoder),
          pixel_format(pixel_format),
          buffer(buffer),
          mjpeg_encoder(mjpeg_encoder),
          pixel_format(pixel_format),
          buffer(buffer),
@@ -47,7 +48,9 @@ PBOFrameAllocator::PBOFrameAllocator(bmusb::PixelFormat pixel_format, size_t fra
 {
        userdata.reset(new Userdata[num_queued_frames]);
        for (size_t i = 0; i < num_queued_frames; ++i) {
 {
        userdata.reset(new Userdata[num_queued_frames]);
        for (size_t i = 0; i < num_queued_frames; ++i) {
-               init_frame(i, frame_size, width, height, permissions, map_bits, generation);
+               Frame frame;
+               init_frame(frame, &userdata[i], this, pixel_format, frame_size, width, height, permissions, map_bits, buffer, generation);
+               freelist.push(frame);
        }
        glBindBuffer(buffer, 0);
        check_error();
        }
        glBindBuffer(buffer, 0);
        check_error();
@@ -55,7 +58,7 @@ PBOFrameAllocator::PBOFrameAllocator(bmusb::PixelFormat pixel_format, size_t fra
        check_error();
 }
 
        check_error();
 }
 
-void PBOFrameAllocator::init_frame(size_t frame_idx, size_t frame_size, GLuint width, GLuint height, GLenum permissions, GLenum map_bits, int generation)
+void PBOFrameAllocator::init_frame(Frame &frame, Userdata *ud, PBOFrameAllocator *owner, bmusb::PixelFormat pixel_format, size_t frame_size, GLuint width, GLuint height, GLenum permissions, GLenum map_bits, GLenum buffer, int generation)
 {
        GLuint pbo;
        glGenBuffers(1, &pbo);
 {
        GLuint pbo;
        glGenBuffers(1, &pbo);
@@ -65,18 +68,16 @@ void PBOFrameAllocator::init_frame(size_t frame_idx, size_t frame_size, GLuint w
        glBufferStorage(buffer, frame_size, nullptr, permissions | GL_MAP_PERSISTENT_BIT);
        check_error();
 
        glBufferStorage(buffer, frame_size, nullptr, permissions | GL_MAP_PERSISTENT_BIT);
        check_error();
 
-       Frame frame;
        frame.data = (uint8_t *)glMapBufferRange(buffer, 0, frame_size, permissions | map_bits | GL_MAP_PERSISTENT_BIT);
        frame.data2 = frame.data + frame_size / 2;
        check_error();
        frame.size = frame_size;
        frame.data = (uint8_t *)glMapBufferRange(buffer, 0, frame_size, permissions | map_bits | GL_MAP_PERSISTENT_BIT);
        frame.data2 = frame.data + frame_size / 2;
        check_error();
        frame.size = frame_size;
-       Userdata *ud = &userdata[frame_idx];
        frame.userdata = ud;
        ud->generation = generation;
        ud->pbo = pbo;
        ud->pixel_format = pixel_format;
        ud->data_copy_malloc = new uint8_t[frame_size];
        frame.userdata = ud;
        ud->generation = generation;
        ud->pbo = pbo;
        ud->pixel_format = pixel_format;
        ud->data_copy_malloc = new uint8_t[frame_size];
-       frame.owner = this;
+       frame.owner = owner;
 
        // For 8-bit non-planar Y'CbCr, we ask the driver to split Y' and Cb/Cr
        // into separate textures. For 10-bit, the input format (v210)
 
        // For 8-bit non-planar Y'CbCr, we ask the driver to split Y' and Cb/Cr
        // into separate textures. For 10-bit, the input format (v210)
@@ -214,8 +215,6 @@ void PBOFrameAllocator::init_frame(size_t frame_idx, size_t frame_size, GLuint w
                        assert(false);
                }
        }
                        assert(false);
                }
        }
-
-       freelist.push(frame);
 }
 
 PBOFrameAllocator::~PBOFrameAllocator()
 }
 
 PBOFrameAllocator::~PBOFrameAllocator()
@@ -282,7 +281,7 @@ void PBOFrameAllocator::destroy_frame(Frame *frame)
 
 bmusb::FrameAllocator::Frame PBOFrameAllocator::alloc_frame()
 {
 
 bmusb::FrameAllocator::Frame PBOFrameAllocator::alloc_frame()
 {
-        Frame vf;
+       Frame vf;
 
        lock_guard<mutex> lock(freelist_mutex);  // Meh.
        if (freelist.empty()) {
 
        lock_guard<mutex> lock(freelist_mutex);  // Meh.
        if (freelist.empty()) {
@@ -310,7 +309,12 @@ bmusb::FrameAllocator::Frame PBOFrameAllocator::alloc_frame()
 
 bmusb::FrameAllocator::Frame PBOFrameAllocator::create_frame(size_t width, size_t height, size_t stride)
 {
 
 bmusb::FrameAllocator::Frame PBOFrameAllocator::create_frame(size_t width, size_t height, size_t stride)
 {
-        Frame vf;
+       Frame vf;
+
+       size_t desired_frame_bytes = width * stride;
+       if (stride > 8192 * 4 || height > 8192 || desired_frame_bytes > MAX_FRAME_SIZE) {
+               return vf;
+       }
 
        {
                lock_guard<mutex> lock(freelist_mutex);
 
        {
                lock_guard<mutex> lock(freelist_mutex);
@@ -324,10 +328,22 @@ bmusb::FrameAllocator::Frame PBOFrameAllocator::create_frame(size_t width, size_
                        freelist.pop();
                }
        }
                        freelist.pop();
                }
        }
-       vf.len = 0;
-       vf.overflow = 0;
 
        Userdata *userdata = (Userdata *)vf.userdata;
 
        Userdata *userdata = (Userdata *)vf.userdata;
+       assert(generation == userdata->generation);
+       if (vf.size < desired_frame_bytes || (vf.size > FRAME_SIZE && vf.size > desired_frame_bytes * 2)) {
+               // Frame is either too small or way too large, so reallocate it.
+               // Note that width and height now automatically becomes the right size
+               // (the one we just asked for, instead of the default for the allocator,
+               // which is generally the global resolution); it doesn't matter
+               // for correctness, since we'll recreate the texture on upload if needed,
+               // but it is nice to save that step.
+               destroy_frame(&vf);
+               init_frame(vf, userdata, this, pixel_format, std::max<size_t>(desired_frame_bytes, FRAME_SIZE), width, height, permissions, map_bits, buffer, generation);
+       };
+
+       vf.len = 0;
+       vf.overflow = 0;
 
        if (mjpeg_encoder != nullptr &&
            mjpeg_encoder->should_encode_mjpeg_for_card(card_index)) {
 
        if (mjpeg_encoder != nullptr &&
            mjpeg_encoder->should_encode_mjpeg_for_card(card_index)) {
@@ -465,7 +481,9 @@ void PBOFrameAllocator::reconfigure(bmusb::PixelFormat pixel_format,
 
        userdata.reset(new Userdata[num_queued_frames]);
        for (size_t i = 0; i < num_queued_frames; ++i) {
 
        userdata.reset(new Userdata[num_queued_frames]);
        for (size_t i = 0; i < num_queued_frames; ++i) {
-               init_frame(i, frame_size, width, height, permissions, map_bits, generation);
+               Frame frame;
+               init_frame(frame, &userdata[i], this, pixel_format, frame_size, width, height, permissions, map_bits, buffer, generation);
+               freelist.push(frame);
        }
 
        // There may still be frames out with the old configuration
        }
 
        // There may still be frames out with the old configuration
index cff50aa6310036fe320f2f2021c9b445ec7e7a75..772d05e2ef257425a6a41d6e6f24918d8b44b239 100644 (file)
@@ -101,7 +101,7 @@ public:
        };
 
 private:
        };
 
 private:
-       void init_frame(size_t frame_idx, size_t frame_size, GLuint width, GLuint height, GLenum permissions, GLenum map_bits, int generation);
+       static void init_frame(Frame &frame, Userdata *ud, PBOFrameAllocator *owner, bmusb::PixelFormat pixel_format, size_t frame_size, GLuint width, GLuint height, GLenum permissions, GLenum map_bits, GLenum buffer, int generation);
        void destroy_frame(Frame *frame);
 
        unsigned card_index;
        void destroy_frame(Frame *frame);
 
        unsigned card_index;
index a7756e4a8ca288f445e2f0af91c3d02c299ee2df..94a8e16161c6da646aa6281c636ab2873efe24cb 100644 (file)
@@ -44,8 +44,8 @@ ResamplingQueue::ResamplingQueue(const std::string &debug_description, unsigned
 
        // Prime the resampler so there's no more delay.
        vresampler.inp_count = vresampler.inpsize() / 2 - 1;
 
        // Prime the resampler so there's no more delay.
        vresampler.inp_count = vresampler.inpsize() / 2 - 1;
-        vresampler.out_count = 1048576;
-        vresampler.process ();
+       vresampler.out_count = 1048576;
+       vresampler.process();
 }
 
 void ResamplingQueue::add_input_samples(steady_clock::time_point ts, const float *samples, ssize_t num_samples, ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy)
 }
 
 void ResamplingQueue::add_input_samples(steady_clock::time_point ts, const float *samples, ssize_t num_samples, ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy)
index 06d147f6686bb07739d7cbb5ee6bafa1ed1f66d5..192b27eabbe981581283cc6e111dddd7345134ff 100644 (file)
@@ -54,6 +54,7 @@
 #include "mainwindow.h"
 #include "pbo_frame_allocator.h"
 #include "scene.h"
 #include "mainwindow.h"
 #include "pbo_frame_allocator.h"
 #include "scene.h"
+#include "shared/context.h"
 
 class Mixer;
 
 
 class Mixer;
 
@@ -123,9 +124,9 @@ InputStateInfo::InputStateInfo(const InputState &input_state)
 // An effect that does nothing.
 class IdentityEffect : public Effect {
 public:
 // An effect that does nothing.
 class IdentityEffect : public Effect {
 public:
-        IdentityEffect() {}
-        string effect_type_id() const override { return "IdentityEffect"; }
-        string output_fragment_shader() override { return read_file("identity.frag"); }
+       IdentityEffect() {}
+       string effect_type_id() const override { return "IdentityEffect"; }
+       string output_fragment_shader() override { return read_file("identity.frag"); }
 };
 
 Effect *instantiate_effect(EffectChain *chain, EffectType effect_type)
 };
 
 Effect *instantiate_effect(EffectChain *chain, EffectType effect_type)
@@ -450,6 +451,9 @@ int ImageInput_new(lua_State* L)
        return wrap_lua_object_nonowned<ImageInput>(L, "ImageInput", filename);
 }
 
        return wrap_lua_object_nonowned<ImageInput>(L, "ImageInput", filename);
 }
 
+}  // namespace
+
+// Must be non-namespaced due to friend declaration.
 int VideoInput_new(lua_State* L)
 {
        assert(lua_gettop(L) == 2);
 int VideoInput_new(lua_State* L)
 {
        assert(lua_gettop(L) == 2);
@@ -460,17 +464,19 @@ int VideoInput_new(lua_State* L)
                print_warning(L, "Invalid enum %d used for video format, choosing Y'CbCr.\n", pixel_format);
                pixel_format = bmusb::PixelFormat_8BitYCbCrPlanar;
        }
                print_warning(L, "Invalid enum %d used for video format, choosing Y'CbCr.\n", pixel_format);
                pixel_format = bmusb::PixelFormat_8BitYCbCrPlanar;
        }
-       int ret = wrap_lua_object_nonowned<FFmpegCapture>(L, "VideoInput", filename, global_flags.width, global_flags.height);
+       Theme *theme = get_theme_updata(L);
+       int ret = wrap_lua_object_nonowned<FFmpegCapture>(L, "VideoInput", filename, global_flags.width, global_flags.height, create_surface_with_same_format(theme->surface));
        if (ret == 1) {
                FFmpegCapture **capture = (FFmpegCapture **)lua_touserdata(L, -1);
                (*capture)->set_pixel_format(bmusb::PixelFormat(pixel_format));
 
        if (ret == 1) {
                FFmpegCapture **capture = (FFmpegCapture **)lua_touserdata(L, -1);
                (*capture)->set_pixel_format(bmusb::PixelFormat(pixel_format));
 
-               Theme *theme = get_theme_updata(L);
                theme->register_video_input(*capture);
        }
        return ret;
 }
 
                theme->register_video_input(*capture);
        }
        return ret;
 }
 
+namespace {
+
 int VideoInput_rewind(lua_State* L)
 {
        assert(lua_gettop(L) == 1);
 int VideoInput_rewind(lua_State* L)
 {
        assert(lua_gettop(L) == 1);
@@ -1513,15 +1519,15 @@ int Nageru_set_audio_bus_eq_level_db(lua_State *L)
        return 0;
 }
 
        return 0;
 }
 
-Theme::Theme(const string &filename, const vector<string> &search_dirs, ResourcePool *resource_pool)
-       : resource_pool(resource_pool), signal_to_card_mapping(global_flags.default_stream_mapping)
+Theme::Theme(const string &filename, const vector<string> &search_dirs, ResourcePool *resource_pool, QSurface *surface)
+       : resource_pool(resource_pool), signal_to_card_mapping(global_flags.default_stream_mapping), surface(surface)
 {
        // Defaults.
        channel_names[0] = "Live";
        channel_names[1] = "Preview";
 
        L = luaL_newstate();
 {
        // Defaults.
        channel_names[0] = "Live";
        channel_names[1] = "Preview";
 
        L = luaL_newstate();
-        luaL_openlibs(L);
+       luaL_openlibs(L);
 
        // Search through all directories until we find a file that will load
        // (as in, does not return LUA_ERRFILE); then run it. We store load errors
 
        // Search through all directories until we find a file that will load
        // (as in, does not return LUA_ERRFILE); then run it. We store load errors
@@ -1631,6 +1637,7 @@ Theme::~Theme()
 {
        theme_menu.reset();
        lua_close(L);
 {
        theme_menu.reset();
        lua_close(L);
+       // Leak the surface.
 }
 
 void Theme::register_globals()
 }
 
 void Theme::register_globals()
index 7fe0e8585a2b0993a359d362d6db95b08951d5ba..2c6021e4e103be0fddbebeb4145c24ddca37f3b4 100644 (file)
@@ -24,6 +24,7 @@ class Scene;
 class CEFCapture;
 class FFmpegCapture;
 class LiveInputWrapper;
 class CEFCapture;
 class FFmpegCapture;
 class LiveInputWrapper;
+class QSurface;
 struct InputState;
 
 namespace movit {
 struct InputState;
 
 namespace movit {
@@ -89,7 +90,7 @@ struct InputStateInfo {
 
 class Theme {
 public:
 
 class Theme {
 public:
-       Theme(const std::string &filename, const std::vector<std::string> &search_dirs, movit::ResourcePool *resource_pool);
+       Theme(const std::string &filename, const std::vector<std::string> &search_dirs, movit::ResourcePool *resource_pool, QSurface *surface);
        ~Theme();
 
        struct Chain {
        ~Theme();
 
        struct Chain {
@@ -244,6 +245,11 @@ private:
        std::map<unsigned, int> channel_signals;  // Set using Nageru.set_channel_signal(). Protected by <m>.
        std::map<unsigned, bool> channel_supports_wb;  // Set using Nageru.set_supports_wb(). Protected by <m>.
 
        std::map<unsigned, int> channel_signals;  // Set using Nageru.set_channel_signal(). Protected by <m>.
        std::map<unsigned, bool> channel_supports_wb;  // Set using Nageru.set_supports_wb(). Protected by <m>.
 
+       // Used to construct OpenGL contexts for VideoInputs. Needs to be available
+       // during the entire lifetime of Theme, since they may be created basically
+       // at any time.
+       const QSurface *surface;
+
        friend class LiveInputWrapper;
        friend class Scene;
        friend int ThemeMenu_set(lua_State *L);
        friend class LiveInputWrapper;
        friend class Scene;
        friend int ThemeMenu_set(lua_State *L);
@@ -251,6 +257,7 @@ private:
        friend int Nageru_set_num_channels(lua_State *L);
        friend int Nageru_set_channel_signal(lua_State *L);
        friend int Nageru_set_supports_wb(lua_State *L);
        friend int Nageru_set_num_channels(lua_State *L);
        friend int Nageru_set_channel_signal(lua_State *L);
        friend int Nageru_set_supports_wb(lua_State *L);
+       friend int VideoInput_new(lua_State* L);
 };
 
 // LiveInputWrapper is a facade on top of an YCbCrInput, exposed to
 };
 
 // LiveInputWrapper is a facade on top of an YCbCrInput, exposed to
index 745e6cab1dbea22678654fb7f1e2c5b00339197d..23d0f001bca520bd638c10e93ba40f058575d590 100644 (file)
@@ -65,11 +65,11 @@ TimecodeRenderer::TimecodeRenderer(movit::ResourcePool *resource_pool, unsigned
 TimecodeRenderer::~TimecodeRenderer()
 {
        resource_pool->release_2d_texture(tex);
 TimecodeRenderer::~TimecodeRenderer()
 {
        resource_pool->release_2d_texture(tex);
-        check_error();
+       check_error();
        resource_pool->release_glsl_program(program_num);
        resource_pool->release_glsl_program(program_num);
-        check_error();
+       check_error();
        glDeleteBuffers(1, &vbo);
        glDeleteBuffers(1, &vbo);
-        check_error();
+       check_error();
 }
 
 string TimecodeRenderer::get_timecode_text(double pts, unsigned frame_num)
 }
 
 string TimecodeRenderer::get_timecode_text(double pts, unsigned frame_num)
@@ -152,15 +152,15 @@ void TimecodeRenderer::render_buffer_to_fbo(GLuint fbo)
        check_error();
 
        glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, display_width, height, GL_RED, GL_UNSIGNED_BYTE, image->bits());
        check_error();
 
        glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, display_width, height, GL_RED, GL_UNSIGNED_BYTE, image->bits());
-        check_error();
+       check_error();
 
        glUseProgram(program_num);
        check_error();
        glUniform1i(texture_sampler_uniform, 0);
 
        glUseProgram(program_num);
        check_error();
        glUniform1i(texture_sampler_uniform, 0);
-        check_error();
+       check_error();
 
 
-        glBindBuffer(GL_ARRAY_BUFFER, vbo);
-        check_error();
+       glBindBuffer(GL_ARRAY_BUFFER, vbo);
+       check_error();
 
        for (GLint attr_index : { position_attribute_index, texcoord_attribute_index }) {
                if (attr_index == -1) continue;
 
        for (GLint attr_index : { position_attribute_index, texcoord_attribute_index }) {
                if (attr_index == -1) continue;
index b9138af5f7dedb738a0ade89ab2166badf3f6d26..c144952ae17b5946611f88db92dfaec1db2e6da3 100644 (file)
@@ -42,7 +42,7 @@ void v210Converter::precompile_shader(unsigned width)
 
        char buf[16];
        snprintf(buf, sizeof(buf), "%u", num_local_work_groups);
 
        char buf[16];
        snprintf(buf, sizeof(buf), "%u", num_local_work_groups);
-        string shader_src = R"(#version 150
+       string shader_src = R"(#version 150
 #extension GL_ARB_compute_shader : enable
 #extension GL_ARB_shader_image_load_store : enable
 layout(local_size_x = )" + string(buf) + R"() in;
 #extension GL_ARB_compute_shader : enable
 #extension GL_ARB_shader_image_load_store : enable
 layout(local_size_x = )" + string(buf) + R"() in;
@@ -144,9 +144,9 @@ void v210Converter::convert(GLuint tex_src, GLuint tex_dst, unsigned width, unsi
        check_error();
        glUniform1i(shader.outbuf_pos, 1);
        check_error();
        check_error();
        glUniform1i(shader.outbuf_pos, 1);
        check_error();
-        glBindImageTexture(0, tex_src, 0, GL_FALSE, 0, GL_READ_ONLY, GL_RGB10_A2);
+       glBindImageTexture(0, tex_src, 0, GL_FALSE, 0, GL_READ_ONLY, GL_RGB10_A2);
        check_error();
        check_error();
-        glBindImageTexture(1, tex_dst, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGB10_A2);
+       glBindImageTexture(1, tex_dst, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGB10_A2);
        check_error();
 
        // Actually run the shader.
        check_error();
 
        // Actually run the shader.
index 8cdf42beb57c35843bd615cc28114ad0772f031b..77ba357b0ad60469f61b9d5a85dfa58ca4abe010 100644 (file)
@@ -58,7 +58,7 @@ X264Dynamic load_x264_for_bit_depth(unsigned depth)
                        x264_suffix = string(ptr, (m->l_name + strlen(m->l_name)) - ptr);
                        break;
                }
                        x264_suffix = string(ptr, (m->l_name + strlen(m->l_name)) - ptr);
                        break;
                }
-        }
+       }
        dlclose(handle);
 
        if (x264_dir.empty()) {
        dlclose(handle);
 
        if (x264_dir.empty()) {