Clang rightfully pointed out that VLAs are a non-standard extension.
We can do just fine with three small heap allocations per software-decoded
JPEG (the main path is VA-API anyway).
}
if (!error_mgr.run([&dinfo, &y_pix, &cb_pix, &cr_pix, pitch_y, pitch_chroma, v_mcu_size, mcu_height_blocks] {
- JSAMPROW yptr[v_mcu_size], cbptr[v_mcu_size], crptr[v_mcu_size];
- JSAMPARRAY data[3] = { yptr, cbptr, crptr };
+ unique_ptr<JSAMPROW[]> yptr(new JSAMPROW[v_mcu_size]);
+ unique_ptr<JSAMPROW[]> cbptr(new JSAMPROW[v_mcu_size]);
+ unique_ptr<JSAMPROW[]> crptr(new JSAMPROW[v_mcu_size]);
+ JSAMPARRAY data[3] = { yptr.get(), cbptr.get(), crptr.get() };
for (unsigned y = 0; y < mcu_height_blocks; ++y) {
// NOTE: The last elements of cbptr/crptr will be unused for vertically subsampled chroma.
for (unsigned yy = 0; yy < v_mcu_size; ++yy) {
signal_changed();
ui->grabbed_frame_label->installEventFilter(this);
- glGenBuffers(1, &pbo);
- glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, pbo);
- glBufferData(GL_PIXEL_PACK_BUFFER_ARB, global_flags.width * global_flags.height * 4, nullptr, GL_STREAM_READ);
+ glGenBuffers(1, &pbo);
+ glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, pbo);
+ glBufferData(GL_PIXEL_PACK_BUFFER_ARB, global_flags.width * global_flags.height * 4, nullptr, GL_STREAM_READ);
}
Analyzer::~Analyzer()
ui->blue_label->setText(u8"—");
ui->hex_label->setText(u8"#—");
}
- return false;
+ return false;
}
void Analyzer::grab_pixel(int x, int y)
global_headers = string(reinterpret_cast<const char *>(header->p_buffer), header->n_filled_len);
svt_av1_enc_stream_header_release(header); // Don't care about errors.
- }
+ }
}
void AV1Encoder::encoder_thread_func()
void callback(float level_lufs, float peak_db,
std::vector<AudioMixer::BusLevel> bus_levels,
- float global_level_lufs, float range_low_lufs, float range_high_lufs,
- float final_makeup_gain_db,
- float correlation)
+ float global_level_lufs, float range_low_lufs, float range_high_lufs,
+ float final_makeup_gain_db,
+ float correlation)
{
// Empty.
}
+++ /dev/null
-
-// Needs to be in its own file because Qt and libepoxy seemingly don't coexist well
-// within the same file.
-
-class QSurface;
-class QOpenGLContext;
-class QSurfaceFormat;
-class QGLWidget;
-
-extern bool using_egl;
-extern QGLWidget *global_share_widget;
-QSurface *create_surface(const QSurfaceFormat &format);
-QSurface *create_surface_with_same_format(const QSurface *surface);
-QOpenGLContext *create_context(const QSurface *surface);
-bool make_current(QOpenGLContext *context, QSurface *surface);
-void delete_context(QOpenGLContext *context);
CorrelationMeasurer::CorrelationMeasurer(unsigned sample_rate,
float lowpass_cutoff_hz,
- float falloff_seconds)
+ float falloff_seconds)
: w1(2.0 * M_PI * lowpass_cutoff_hz / sample_rate),
w2(1.0 / (falloff_seconds * sample_rate))
{
assert(stride == width * 2);
}
- current_video_frame = video_frame_allocator->create_frame(width, height, stride);
+ if (width * stride > FRAME_SIZE) {
+ // TODO: If we had an OpenGL context here, calling create_frame()
+ // would be completely fine.
+ fprintf(stderr, "Card %u: Captured frame %d x %d (stride %d) would be larger than supported frame size (%d > %d), skipping.\n",
+ card_index, width, height, stride, width * stride, FRAME_SIZE);
+ } else {
+ current_video_frame = video_frame_allocator->create_frame(width, height, stride);
+ }
if (current_video_frame.data != nullptr) {
const uint8_t *src;
video_frame->GetBytes((void **)&src);
// #define MAX_VIDEO_CARDS 16 // defined in shared_defs.h.
#define MAX_ALSA_CARDS 16
#define MAX_BUSES 256 // Audio buses.
-#define FRAME_SIZE (8 << 20) // 8 MB. (FIXME: Not enough for a 2160p frame!)
+
+// FRAME_SIZE is the default frame size, in bytes. FFmpeg inputs (video files and SRT streams)
+// can allocate larger frames as needed; USB and DeckLink outputs always use FRAME_SIZE.
+// We should eventually add support for at least DeckLink outputs, allowing us to capture
+// 2160p frames. Also, it would allow us to lower the default frame size to the maximum
+// bmusb supports (2 MB just about covers 1080i 4:2:2, then add some for 10-bit?) to waste
+// less memory.
+//
+// As a general sanity check, we also have a MAX_FRAME_SIZE that even dynamic allocation
+// will not go past.
+#define FRAME_SIZE (8 << 20) // 8 MB (just enough for 1080p RGBA).
+#define MAX_FRAME_SIZE (140 << 20) // 140 MB; enough for 8192*4320 RGBA and then some.
// For deinterlacing. See also comments on InputState.
#define FRAME_HISTORY_LENGTH 5
#include "shared/shared_defs.h"
#include <assert.h>
-#include <cerrno>
-#include <ctime>
+#include <errno.h>
+#include <epoxy/egl.h>
#include <limits>
#include <map>
#include <memory>
#include <movit/ycbcr.h>
#include <mutex>
#include <pthread.h>
+#include <time.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <sys/stat.h>
#include <thread>
+#include <QSurface>
extern "C" {
#include <libavcodec/avcodec.h>
#include <movit/colorspace_conversion_effect.h>
#include "bmusb/bmusb.h"
+#include "shared/context.h"
#include "shared/ffmpeg_raii.h"
#include "ffmpeg_util.h"
#include "flags.h"
} // namespace
-FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height)
- : filename(filename), width(width), height(height), video_timebase{1, 1}
+FFmpegCapture::FFmpegCapture(const string &filename, unsigned width, unsigned height, QSurface *surface)
+ : filename(filename), width(width), height(height), video_timebase{1, 1}, surface(surface)
{
description = "Video: " + filename;
}
#ifdef HAVE_SRT
-FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id)
+FFmpegCapture::FFmpegCapture(int srt_sock, const string &stream_id, QSurface *surface)
: srt_sock(srt_sock),
width(0), // Don't resize; SRT streams typically have stable resolution, and should behave much like regular cards in general.
height(0),
pixel_format(bmusb::PixelFormat_8BitYCbCrPlanar),
- video_timebase{1, 1}
+ video_timebase{1, 1},
+ surface(surface)
{
if (stream_id.empty()) {
description = "SRT stream";
srt_close(srt_sock);
}
#endif
+ delete surface;
}
void FFmpegCapture::configure_card()
snprintf(thread_name, sizeof(thread_name), "FFmpeg_C_%d", card_index);
pthread_setname_np(pthread_self(), thread_name);
+ // We need a context in case create_frame() needs to reallocate something.
+ // (If none is given, we are probably in Kaeru, which uses MallocFrameAllocator
+ // anyway, which doesn't reallocate currently and definitely doesn't need
+ // an active OpenGL context to do so.)
+ QOpenGLContext *context = nullptr;
+ if (surface != nullptr) {
+ context = create_context(this->surface);
+ eglBindAPI(EGL_OPENGL_API);
+ if (!make_current(context, this->surface)) {
+ printf("display=%p surface=%p context=%p curr=%p err=%d\n", eglGetCurrentDisplay(), this->surface, context, eglGetCurrentContext(),
+ eglGetError());
+ abort();
+ }
+ }
+
while (!producer_thread_should_quit.should_quit()) {
string filename_copy;
{
}
if (has_dequeue_callbacks) {
- dequeue_cleanup_callback();
+ dequeue_cleanup_callback();
has_dequeue_callbacks = false;
- }
+ }
+
+ delete_context(context);
}
void FFmpegCapture::send_disconnected_frame()
int out_samples = swr_convert(resampler, &data, num_samples_room,
const_cast<const uint8_t **>(audio_avframe->data), audio_avframe->nb_samples);
if (out_samples < 0) {
- fprintf(stderr, "Audio conversion failed.\n");
- abort();
- }
+ fprintf(stderr, "Audio conversion failed.\n");
+ abort();
+ }
audio_frame->len += out_samples * bytes_per_sample;
}
{
*error = false;
- UniqueFrame video_frame(video_frame_allocator->alloc_frame());
+ UniqueFrame video_frame(video_frame_allocator->create_frame(frame->width, frame->height, frame->width));
if (video_frame->data == nullptr) {
return video_frame;
}
// FIXME: Currently, if the video is too high-res for one of the allocated
// frames, we simply refuse to scale it here to avoid crashes. It would be better
// if we could somehow signal getting larger frames, especially as 4K is a thing now.
- if (video_frame->len > FRAME_SIZE) {
- fprintf(stderr, "%s: Decoded frame would be larger than supported FRAME_SIZE (%zu > %u), not decoding.\n", pathname.c_str(), video_frame->len, FRAME_SIZE);
+ if (video_frame->len > video_frame->size) {
+ fprintf(stderr, "%s: Decoded frame would be larger than supported frame size (%zu > %zu), not decoding.\n", pathname.c_str(), video_frame->len, video_frame->size);
*error = true;
return video_frame;
}
struct AVFrame;
struct AVRational;
struct AVPacket;
+class QSurface;
class FFmpegCapture : public bmusb::CaptureInterface
{
public:
- FFmpegCapture(const std::string &filename, unsigned width, unsigned height);
+ FFmpegCapture(const std::string &filename, unsigned width, unsigned height, QSurface *surface);
#ifdef HAVE_SRT
// Takes ownership of the SRT client socket.
- FFmpegCapture(int srt_sock, const std::string &stream_id);
+ FFmpegCapture(int srt_sock, const std::string &stream_id, QSurface *surface);
#endif
~FFmpegCapture();
// -1 is strictly speaking outside the range of the enum, but hopefully, it will be alright.
AVColorSpace last_colorspace = static_cast<AVColorSpace>(-1);
AVChromaLocation last_chroma_location = static_cast<AVChromaLocation>(-1);
+ QSurface *const surface;
};
#endif // !defined(_FFMPEG_CAPTURE_H)
#else // !defined(__SSE__)
union uint_float {
- float f;
- unsigned int i;
+ float f;
+ unsigned int i;
};
#define early_undenormalise(sample) { \
uint_float uf; \
void InputMappingDialog::updown_clicked(int direction)
{
assert(ui->table->selectedRanges().size() == 1);
- const QTableWidgetSelectionRange &range = ui->table->selectedRanges()[0];
+ QTableWidgetSelectionRange range = ui->table->selectedRanges()[0];
int a_row = range.bottomRow();
int b_row = range.bottomRow() + direction;
}
global_x264_encoder = x264_encoder.get();
- FFmpegCapture video(argv[optind], global_flags.width, global_flags.height);
+ FFmpegCapture video(argv[optind], global_flags.width, global_flags.height, /*surface=*/nullptr);
video.set_pixel_format(FFmpegCapture::PixelFormat_NV12);
if (global_flags.transcode_video) {
video.set_frame_callback(bind(video_frame_callback, &video, x264_encoder.get(), audio_encoder.get(), _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11));
}
num_controller_banks = min(max(mapping_proto->num_controller_banks(), 1), 5);
- current_controller_bank = 0;
+ current_controller_bank = 0;
receiver->clear_all_highlights();
update_highlights();
MIDIMappingDialog::MIDIMappingDialog(MIDIMapper *mapper)
: ui(new Ui::MIDIMappingDialog),
- mapper(mapper)
+ mapper(mapper)
{
ui->setupUi(this);
}
// Must be instantiated after VideoEncoder has initialized global_flags.use_zerocopy.
- theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get()));
+ theme.reset(new Theme(global_flags.theme_filename, global_flags.theme_dirs, resource_pool.get(), create_surface(format)));
// Must be instantiated after the theme, as the theme decides the number of FFmpeg inputs.
std::vector<FFmpegCapture *> video_inputs = theme->get_video_inputs();
card->frame_allocator->reconfigure(pixel_format, FRAME_SIZE, global_flags.width, global_flags.height, card_index, mjpeg_encoder.get());
}
card->capture->set_video_frame_allocator(card->frame_allocator.get());
- if (card->surface == nullptr) {
- card->surface = create_surface_with_same_format(mixer_surface);
- }
while (!card->new_frames.empty()) card->new_frames.pop_front();
card->last_timecode = -1;
card->capture->set_pixel_format(pixel_format);
void Mixer::bm_frame(unsigned card_index, uint16_t timecode,
FrameAllocator::Frame video_frame, size_t video_offset, VideoFormat video_format,
- FrameAllocator::Frame audio_frame, size_t audio_offset, AudioFormat audio_format)
+ FrameAllocator::Frame audio_frame, size_t audio_offset, AudioFormat audio_format)
{
DeviceSpec device{InputSourceType::CAPTURE_CARD, card_index};
CaptureCard *card = &cards[card_index];
fprintf(stderr, "New SRT stream connected (%s), choosing slot %d.\n", stream_id.c_str(), free_card_index);
}
CaptureCard *card = &cards[free_card_index];
- FFmpegCapture *capture = new FFmpegCapture(sock, stream_id);
+ FFmpegCapture *capture = new FFmpegCapture(sock, stream_id, create_surface_with_same_format(mixer_surface));
capture->set_card_index(free_card_index);
configure_card(free_card_index, capture, CardType::FFMPEG_INPUT, /*output=*/nullptr, /*is_srt_card=*/true);
card->srt_metrics.update_srt_stats(sock); // Initial zero stats.
std::unique_ptr<PBOFrameAllocator> frame_allocator;
- // Stuff for the OpenGL context (for texture uploading).
- QSurface *surface = nullptr;
-
struct NewFrame {
RefCountedFrame frame;
int64_t length; // In TIMEBASE units.
#include <va/va.h>
#include "mjpeg_encoder.h"
+#include "defs.h"
#include "shared/va_resource_pool.h"
#include "v210_converter.h"
#include "shared/va_display.h"
} // namespace
PBOFrameAllocator::PBOFrameAllocator(bmusb::PixelFormat pixel_format, size_t frame_size, GLuint width, GLuint height, unsigned card_index, MJPEGEncoder *mjpeg_encoder, size_t num_queued_frames, GLenum buffer, GLenum permissions, GLenum map_bits)
- : card_index(card_index),
+ : card_index(card_index),
mjpeg_encoder(mjpeg_encoder),
pixel_format(pixel_format),
buffer(buffer),
{
userdata.reset(new Userdata[num_queued_frames]);
for (size_t i = 0; i < num_queued_frames; ++i) {
- init_frame(i, frame_size, width, height, permissions, map_bits, generation);
+ Frame frame;
+ init_frame(frame, &userdata[i], this, pixel_format, frame_size, width, height, permissions, map_bits, buffer, generation);
+ freelist.push(frame);
}
glBindBuffer(buffer, 0);
check_error();
check_error();
}
-void PBOFrameAllocator::init_frame(size_t frame_idx, size_t frame_size, GLuint width, GLuint height, GLenum permissions, GLenum map_bits, int generation)
+void PBOFrameAllocator::init_frame(Frame &frame, Userdata *ud, PBOFrameAllocator *owner, bmusb::PixelFormat pixel_format, size_t frame_size, GLuint width, GLuint height, GLenum permissions, GLenum map_bits, GLenum buffer, int generation)
{
GLuint pbo;
glGenBuffers(1, &pbo);
glBufferStorage(buffer, frame_size, nullptr, permissions | GL_MAP_PERSISTENT_BIT);
check_error();
- Frame frame;
frame.data = (uint8_t *)glMapBufferRange(buffer, 0, frame_size, permissions | map_bits | GL_MAP_PERSISTENT_BIT);
frame.data2 = frame.data + frame_size / 2;
check_error();
frame.size = frame_size;
- Userdata *ud = &userdata[frame_idx];
frame.userdata = ud;
ud->generation = generation;
ud->pbo = pbo;
ud->pixel_format = pixel_format;
ud->data_copy_malloc = new uint8_t[frame_size];
- frame.owner = this;
+ frame.owner = owner;
// For 8-bit non-planar Y'CbCr, we ask the driver to split Y' and Cb/Cr
// into separate textures. For 10-bit, the input format (v210)
assert(false);
}
}
-
- freelist.push(frame);
}
PBOFrameAllocator::~PBOFrameAllocator()
bmusb::FrameAllocator::Frame PBOFrameAllocator::alloc_frame()
{
- Frame vf;
+ Frame vf;
lock_guard<mutex> lock(freelist_mutex); // Meh.
if (freelist.empty()) {
bmusb::FrameAllocator::Frame PBOFrameAllocator::create_frame(size_t width, size_t height, size_t stride)
{
- Frame vf;
+ Frame vf;
+
+ size_t desired_frame_bytes = width * stride;
+ if (stride > 8192 * 4 || height > 8192 || desired_frame_bytes > MAX_FRAME_SIZE) {
+ return vf;
+ }
{
lock_guard<mutex> lock(freelist_mutex);
freelist.pop();
}
}
- vf.len = 0;
- vf.overflow = 0;
Userdata *userdata = (Userdata *)vf.userdata;
+ assert(generation == userdata->generation);
+ if (vf.size < desired_frame_bytes || (vf.size > FRAME_SIZE && vf.size > desired_frame_bytes * 2)) {
+ // Frame is either too small or way too large, so reallocate it.
+ // Note that width and height now automatically becomes the right size
+ // (the one we just asked for, instead of the default for the allocator,
+ // which is generally the global resolution); it doesn't matter
+ // for correctness, since we'll recreate the texture on upload if needed,
+ // but it is nice to save that step.
+ destroy_frame(&vf);
+ init_frame(vf, userdata, this, pixel_format, std::max<size_t>(desired_frame_bytes, FRAME_SIZE), width, height, permissions, map_bits, buffer, generation);
+ };
+
+ vf.len = 0;
+ vf.overflow = 0;
if (mjpeg_encoder != nullptr &&
mjpeg_encoder->should_encode_mjpeg_for_card(card_index)) {
userdata.reset(new Userdata[num_queued_frames]);
for (size_t i = 0; i < num_queued_frames; ++i) {
- init_frame(i, frame_size, width, height, permissions, map_bits, generation);
+ Frame frame;
+ init_frame(frame, &userdata[i], this, pixel_format, frame_size, width, height, permissions, map_bits, buffer, generation);
+ freelist.push(frame);
}
// There may still be frames out with the old configuration
};
private:
- void init_frame(size_t frame_idx, size_t frame_size, GLuint width, GLuint height, GLenum permissions, GLenum map_bits, int generation);
+ static void init_frame(Frame &frame, Userdata *ud, PBOFrameAllocator *owner, bmusb::PixelFormat pixel_format, size_t frame_size, GLuint width, GLuint height, GLenum permissions, GLenum map_bits, GLenum buffer, int generation);
void destroy_frame(Frame *frame);
unsigned card_index;
// Prime the resampler so there's no more delay.
vresampler.inp_count = vresampler.inpsize() / 2 - 1;
- vresampler.out_count = 1048576;
- vresampler.process ();
+ vresampler.out_count = 1048576;
+ vresampler.process();
}
void ResamplingQueue::add_input_samples(steady_clock::time_point ts, const float *samples, ssize_t num_samples, ResamplingQueue::RateAdjustmentPolicy rate_adjustment_policy)
#include "mainwindow.h"
#include "pbo_frame_allocator.h"
#include "scene.h"
+#include "shared/context.h"
class Mixer;
// An effect that does nothing.
class IdentityEffect : public Effect {
public:
- IdentityEffect() {}
- string effect_type_id() const override { return "IdentityEffect"; }
- string output_fragment_shader() override { return read_file("identity.frag"); }
+ IdentityEffect() {}
+ string effect_type_id() const override { return "IdentityEffect"; }
+ string output_fragment_shader() override { return read_file("identity.frag"); }
};
Effect *instantiate_effect(EffectChain *chain, EffectType effect_type)
return wrap_lua_object_nonowned<ImageInput>(L, "ImageInput", filename);
}
+} // namespace
+
+// Must be non-namespaced due to friend declaration.
int VideoInput_new(lua_State* L)
{
assert(lua_gettop(L) == 2);
print_warning(L, "Invalid enum %d used for video format, choosing Y'CbCr.\n", pixel_format);
pixel_format = bmusb::PixelFormat_8BitYCbCrPlanar;
}
- int ret = wrap_lua_object_nonowned<FFmpegCapture>(L, "VideoInput", filename, global_flags.width, global_flags.height);
+ Theme *theme = get_theme_updata(L);
+ int ret = wrap_lua_object_nonowned<FFmpegCapture>(L, "VideoInput", filename, global_flags.width, global_flags.height, create_surface_with_same_format(theme->surface));
if (ret == 1) {
FFmpegCapture **capture = (FFmpegCapture **)lua_touserdata(L, -1);
(*capture)->set_pixel_format(bmusb::PixelFormat(pixel_format));
- Theme *theme = get_theme_updata(L);
theme->register_video_input(*capture);
}
return ret;
}
+namespace {
+
int VideoInput_rewind(lua_State* L)
{
assert(lua_gettop(L) == 1);
return 0;
}
-Theme::Theme(const string &filename, const vector<string> &search_dirs, ResourcePool *resource_pool)
- : resource_pool(resource_pool), signal_to_card_mapping(global_flags.default_stream_mapping)
+Theme::Theme(const string &filename, const vector<string> &search_dirs, ResourcePool *resource_pool, QSurface *surface)
+ : resource_pool(resource_pool), signal_to_card_mapping(global_flags.default_stream_mapping), surface(surface)
{
// Defaults.
channel_names[0] = "Live";
channel_names[1] = "Preview";
L = luaL_newstate();
- luaL_openlibs(L);
+ luaL_openlibs(L);
// Search through all directories until we find a file that will load
// (as in, does not return LUA_ERRFILE); then run it. We store load errors
{
theme_menu.reset();
lua_close(L);
+ // Leak the surface.
}
void Theme::register_globals()
class CEFCapture;
class FFmpegCapture;
class LiveInputWrapper;
+class QSurface;
struct InputState;
namespace movit {
class Theme {
public:
- Theme(const std::string &filename, const std::vector<std::string> &search_dirs, movit::ResourcePool *resource_pool);
+ Theme(const std::string &filename, const std::vector<std::string> &search_dirs, movit::ResourcePool *resource_pool, QSurface *surface);
~Theme();
struct Chain {
std::map<unsigned, int> channel_signals; // Set using Nageru.set_channel_signal(). Protected by <m>.
std::map<unsigned, bool> channel_supports_wb; // Set using Nageru.set_supports_wb(). Protected by <m>.
+ // Used to construct OpenGL contexts for VideoInputs. Needs to be available
+ // during the entire lifetime of Theme, since they may be created basically
+ // at any time.
+ const QSurface *surface;
+
friend class LiveInputWrapper;
friend class Scene;
friend int ThemeMenu_set(lua_State *L);
friend int Nageru_set_num_channels(lua_State *L);
friend int Nageru_set_channel_signal(lua_State *L);
friend int Nageru_set_supports_wb(lua_State *L);
+ friend int VideoInput_new(lua_State* L);
};
// LiveInputWrapper is a facade on top of an YCbCrInput, exposed to
TimecodeRenderer::~TimecodeRenderer()
{
resource_pool->release_2d_texture(tex);
- check_error();
+ check_error();
resource_pool->release_glsl_program(program_num);
- check_error();
+ check_error();
glDeleteBuffers(1, &vbo);
- check_error();
+ check_error();
}
string TimecodeRenderer::get_timecode_text(double pts, unsigned frame_num)
check_error();
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, display_width, height, GL_RED, GL_UNSIGNED_BYTE, image->bits());
- check_error();
+ check_error();
glUseProgram(program_num);
check_error();
glUniform1i(texture_sampler_uniform, 0);
- check_error();
+ check_error();
- glBindBuffer(GL_ARRAY_BUFFER, vbo);
- check_error();
+ glBindBuffer(GL_ARRAY_BUFFER, vbo);
+ check_error();
for (GLint attr_index : { position_attribute_index, texcoord_attribute_index }) {
if (attr_index == -1) continue;
char buf[16];
snprintf(buf, sizeof(buf), "%u", num_local_work_groups);
- string shader_src = R"(#version 150
+ string shader_src = R"(#version 150
#extension GL_ARB_compute_shader : enable
#extension GL_ARB_shader_image_load_store : enable
layout(local_size_x = )" + string(buf) + R"() in;
check_error();
glUniform1i(shader.outbuf_pos, 1);
check_error();
- glBindImageTexture(0, tex_src, 0, GL_FALSE, 0, GL_READ_ONLY, GL_RGB10_A2);
+ glBindImageTexture(0, tex_src, 0, GL_FALSE, 0, GL_READ_ONLY, GL_RGB10_A2);
check_error();
- glBindImageTexture(1, tex_dst, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGB10_A2);
+ glBindImageTexture(1, tex_dst, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGB10_A2);
check_error();
// Actually run the shader.
x264_suffix = string(ptr, (m->l_name + strlen(m->l_name)) - ptr);
break;
}
- }
+ }
dlclose(handle);
if (x264_dir.empty()) {