// Three-lobed Lanczos, the most common choice.
// Note that if you change this, the accuracy for LANCZOS_TABLE_SIZE
// needs to be recomputed.
-#define LANCZOS_RADIUS 3.0
+#define LANCZOS_RADIUS 3.0f
#include <epoxy/gl.h>
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <algorithm>
+#include <mutex>
#include <Eigen/Sparse>
#include <Eigen/SparseQR>
#include <Eigen/OrderingMethods>
namespace {
-template<class T>
-struct Tap {
- T weight;
- T pos;
-};
-
float sinc(float x)
{
if (fabs(x) < 1e-6) {
//
// Solve for e = 1e-6 yields a step size of 0.0027, which to cover the range
// 0..3 needs 1109 steps. We round up to the next power of two, just to be sure.
+//
+// You need to call lanczos_table_init_done before the first call to
+// lanczos_weight_cached.
#define LANCZOS_TABLE_SIZE 2048
-bool lanczos_table_init_done = false;
+static once_flag lanczos_table_init_done;
float lanczos_table[LANCZOS_TABLE_SIZE + 2];
void init_lanczos_table()
for (unsigned i = 0; i < LANCZOS_TABLE_SIZE + 2; ++i) {
lanczos_table[i] = lanczos_weight(float(i) * (LANCZOS_RADIUS / LANCZOS_TABLE_SIZE));
}
- lanczos_table_init_done = true;
}
float lanczos_weight_cached(float x)
{
- if (!lanczos_table_init_done) {
- // Could in theory race between two threads if we are unlucky,
- // but that is harmless, since they'll write the same data.
- init_lanczos_table();
- }
x = fabs(x);
if (x > LANCZOS_RADIUS) {
return 0.0f;
}
float table_pos = x * (LANCZOS_TABLE_SIZE / LANCZOS_RADIUS);
- int table_pos_int = int(table_pos); // Truncate towards zero.
+ unsigned table_pos_int = int(table_pos); // Truncate towards zero.
float table_pos_frac = table_pos - table_pos_int;
assert(table_pos < LANCZOS_TABLE_SIZE + 2);
return lanczos_table[table_pos_int] +
}
template<class DestFloat>
-unsigned combine_samples(const Tap<float> *src, Tap<DestFloat> *dst, float num_subtexels, float inv_num_subtexels, unsigned num_src_samples, unsigned max_samples_saved)
+unsigned combine_samples(const Tap<float> *src, Tap<DestFloat> *dst, float num_subtexels, float inv_num_subtexels, unsigned num_src_samples, unsigned max_samples_saved, float pos1_pos2_diff, float inv_pos1_pos2_diff)
{
// Cut off near-zero values at both sides.
unsigned num_samples_saved = 0;
for (unsigned i = 0, j = 0; i < num_src_samples; ++i, ++j) {
// Copy the sample directly; it will be overwritten later if we can combine.
- if (dst != NULL) {
+ if (dst != nullptr) {
dst[j].weight = convert_float<float, DestFloat>(src[i].weight);
dst[j].pos = convert_float<float, DestFloat>(src[i].pos);
}
DestFloat pos, total_weight;
float sum_sq_error;
- combine_two_samples(w1, w2, pos1, pos2, num_subtexels, inv_num_subtexels, &pos, &total_weight, &sum_sq_error);
+ combine_two_samples(w1, w2, pos1, pos1_pos2_diff, inv_pos1_pos2_diff, num_subtexels, inv_num_subtexels, &pos, &total_weight, &sum_sq_error);
// If the interpolation error is larger than that of about sqrt(2) of
// a level at 8-bit precision, don't combine. (You'd think 1.0 was enough,
}
// OK, we can combine this and the next sample.
- if (dst != NULL) {
+ if (dst != nullptr) {
dst[j].weight = total_weight;
dst[j].pos = pos;
}
void normalize_sum(Tap<T>* vals, unsigned num)
{
for (int normalize_pass = 0; normalize_pass < 2; ++normalize_pass) {
- double sum = 0.0;
+ float sum = 0.0;
for (unsigned i = 0; i < num; ++i) {
- sum += to_fp64(vals[i].weight);
+ sum += to_fp32(vals[i].weight);
}
- double inv_sum = 1.0 / sum;
+ float inv_sum = 1.0 / sum;
for (unsigned i = 0; i < num; ++i) {
- vals[i].weight = from_fp64<T>(to_fp64(vals[i].weight) * inv_sum);
+ vals[i].weight = from_fp32<T>(to_fp32(vals[i].weight) * inv_sum);
}
}
}
//
// The greedy strategy for combining samples is optimal.
template<class DestFloat>
-unsigned combine_many_samples(const Tap<float> *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, Tap<DestFloat> **bilinear_weights)
+unsigned combine_many_samples(const Tap<float> *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, unique_ptr<Tap<DestFloat>[]> *bilinear_weights)
{
float num_subtexels = src_size / movit_texel_subpixel_precision;
float inv_num_subtexels = movit_texel_subpixel_precision / src_size;
+ float pos1_pos2_diff = 1.0f / src_size;
+ float inv_pos1_pos2_diff = src_size;
unsigned max_samples_saved = UINT_MAX;
for (unsigned y = 0; y < dst_samples && max_samples_saved > 0; ++y) {
- unsigned num_samples_saved = combine_samples<DestFloat>(weights + y * src_samples, NULL, num_subtexels, inv_num_subtexels, src_samples, max_samples_saved);
+ unsigned num_samples_saved = combine_samples<DestFloat>(weights + y * src_samples, nullptr, num_subtexels, inv_num_subtexels, src_samples, max_samples_saved, pos1_pos2_diff, inv_pos1_pos2_diff);
max_samples_saved = min(max_samples_saved, num_samples_saved);
}
// Now that we know the right width, actually combine the samples.
unsigned src_bilinear_samples = src_samples - max_samples_saved;
- *bilinear_weights = new Tap<DestFloat>[dst_samples * src_bilinear_samples];
+ bilinear_weights->reset(new Tap<DestFloat>[dst_samples * src_bilinear_samples]);
for (unsigned y = 0; y < dst_samples; ++y) {
- Tap<DestFloat> *bilinear_weights_ptr = *bilinear_weights + y * src_bilinear_samples;
+ Tap<DestFloat> *bilinear_weights_ptr = bilinear_weights->get() + y * src_bilinear_samples;
unsigned num_samples_saved = combine_samples(
weights + y * src_samples,
bilinear_weights_ptr,
num_subtexels,
inv_num_subtexels,
src_samples,
- max_samples_saved);
+ max_samples_saved,
+ pos1_pos2_diff,
+ inv_pos1_pos2_diff);
assert(num_samples_saved == max_samples_saved);
normalize_sum(bilinear_weights_ptr, src_bilinear_samples);
}
// Find the effective range of the bilinear-optimized kernel.
// Due to rounding of the positions, this is not necessarily the same
// as the intended range (ie., the range of the original weights).
- int lower_pos = int(floor(to_fp64(bilinear_weights[0].pos) * size - 0.5));
- int upper_pos = int(ceil(to_fp64(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5)) + 2;
- lower_pos = min<int>(lower_pos, lrintf(weights[0].pos * size - 0.5));
- upper_pos = max<int>(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5) + 1);
+ int lower_pos = int(floor(to_fp32(bilinear_weights[0].pos) * size - 0.5f));
+ int upper_pos = int(ceil(to_fp32(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5f)) + 2;
+ lower_pos = min<int>(lower_pos, lrintf(weights[0].pos * size - 0.5f));
+ upper_pos = max<int>(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5f) + 1);
float* effective_weights = new float[upper_pos - lower_pos];
for (int i = 0; i < upper_pos - lower_pos; ++i) {
// Now find the effective weights that result from this sampling.
for (unsigned i = 0; i < num_bilinear_weights; ++i) {
- const float pixel_pos = to_fp64(bilinear_weights[i].pos) * size - 0.5f;
+ const float pixel_pos = to_fp32(bilinear_weights[i].pos) * size - 0.5f;
const int x0 = int(floor(pixel_pos)) - lower_pos;
const int x1 = x0 + 1;
const float f = lrintf((pixel_pos - (x0 + lower_pos)) / movit_texel_subpixel_precision) * movit_texel_subpixel_precision;
assert(x0 < upper_pos - lower_pos);
assert(x1 < upper_pos - lower_pos);
- effective_weights[x0] += to_fp64(bilinear_weights[i].weight) * (1.0 - f);
- effective_weights[x1] += to_fp64(bilinear_weights[i].weight) * f;
+ effective_weights[x0] += to_fp32(bilinear_weights[i].weight) * (1.0f - f);
+ effective_weights[x1] += to_fp32(bilinear_weights[i].weight) * f;
}
// Subtract the desired weights to get the error.
register_int("height", &output_height);
// The first blur pass will forward resolution information to us.
- hpass = new SingleResamplePassEffect(this);
+ hpass_owner.reset(new SingleResamplePassEffect(this));
+ hpass = hpass_owner.get();
CHECK(hpass->set_int("direction", SingleResamplePassEffect::HORIZONTAL));
- vpass = new SingleResamplePassEffect(NULL);
+ vpass_owner.reset(new SingleResamplePassEffect(this));
+ vpass = vpass_owner.get();
CHECK(vpass->set_int("direction", SingleResamplePassEffect::VERTICAL));
update_size();
}
+ResampleEffect::~ResampleEffect()
+{
+}
+
void ResampleEffect::rewrite_graph(EffectChain *graph, Node *self)
{
- Node *hpass_node = graph->add_node(hpass);
- Node *vpass_node = graph->add_node(vpass);
+ Node *hpass_node = graph->add_node(hpass_owner.release());
+ Node *vpass_node = graph->add_node(vpass_owner.release());
graph->connect_nodes(hpass_node, vpass_node);
graph->replace_receiver(self, hpass_node);
graph->replace_sender(self, vpass_node);
SingleResamplePassEffect::SingleResamplePassEffect(ResampleEffect *parent)
: parent(parent),
direction(HORIZONTAL),
- input_width(1280),
- input_height(720),
+ input_width(1280),
+ input_height(720),
offset(0.0),
zoom(1.0),
last_input_width(-1),
last_output_width(-1),
last_output_height(-1),
last_offset(0.0 / 0.0), // NaN.
- last_zoom(0.0 / 0.0), // NaN.
- last_texture_width(-1), last_texture_height(-1)
+ last_zoom(0.0 / 0.0) // NaN.
{
register_int("direction", (int *)&direction);
register_int("input_width", &input_width);
register_uniform_float("sample_x_offset", &uniform_sample_x_offset);
register_uniform_float("whole_pixel_offset", &uniform_whole_pixel_offset);
- glGenTextures(1, &texnum);
+ call_once(lanczos_table_init_done, init_lanczos_table);
}
SingleResamplePassEffect::~SingleResamplePassEffect()
{
- glDeleteTextures(1, &texnum);
}
string SingleResamplePassEffect::output_fragment_shader()
assert(false);
}
+ ScalingWeights weights = calculate_bilinear_scaling_weights(src_size, dst_size, zoom, offset);
+ src_bilinear_samples = weights.src_bilinear_samples;
+ num_loops = weights.num_loops;
+ slice_height = 1.0f / weights.num_loops;
+
+ // Encode as a two-component texture. Note the GL_REPEAT.
+ glActiveTexture(GL_TEXTURE0 + *sampler_num);
+ check_error();
+ glBindTexture(GL_TEXTURE_2D, tex.get_texnum());
+ check_error();
+
+ GLenum type, internal_format;
+ void *pixels;
+ assert((weights.bilinear_weights_fp16 == nullptr) != (weights.bilinear_weights_fp32 == nullptr));
+ if (weights.bilinear_weights_fp32 != nullptr) {
+ type = GL_FLOAT;
+ internal_format = GL_RG32F;
+ pixels = weights.bilinear_weights_fp32.get();
+ } else {
+ type = GL_HALF_FLOAT;
+ internal_format = GL_RG16F;
+ pixels = weights.bilinear_weights_fp16.get();
+ }
+
+ tex.update(weights.src_bilinear_samples, weights.dst_samples, internal_format, GL_RG, type, pixels);
+}
+
+namespace {
+
+ScalingWeights calculate_scaling_weights(unsigned src_size, unsigned dst_size, float zoom, float offset)
+{
+ // Only needed if run from outside ResampleEffect.
+ call_once(lanczos_table_init_done, init_lanczos_table);
+
// For many resamplings (e.g. 640 -> 1280), we will end up with the same
// set of samples over and over again in a loop. Thus, we can compute only
// the first such loop, and then ask the card to repeat the texture for us.
// This is both easier on the texture cache and lowers our CPU cost for
// generating the kernel somewhat.
float scaling_factor;
+ int num_loops;
if (fabs(zoom - 1.0f) < 1e-6) {
num_loops = gcd(src_size, dst_size);
scaling_factor = float(dst_size) / float(src_size);
num_loops = 1;
scaling_factor = zoom * float(dst_size) / float(src_size);
}
- slice_height = 1.0f / num_loops;
unsigned dst_samples = dst_size / num_loops;
// Sample the kernel in the right place. A diagram with a triangular kernel
// to compute the destination pixel, and how many depend on the scaling factor.
// Thus, the kernel width will vary with how much we scale.
float radius_scaling_factor = min(scaling_factor, 1.0f);
- int int_radius = lrintf(LANCZOS_RADIUS / radius_scaling_factor);
- int src_samples = int_radius * 2 + 1;
- Tap<float> *weights = new Tap<float>[dst_samples * src_samples];
+ const int int_radius = lrintf(LANCZOS_RADIUS / radius_scaling_factor);
+ const int src_samples = int_radius * 2 + 1;
+ unique_ptr<Tap<float>[]> weights(new Tap<float>[dst_samples * src_samples]);
float subpixel_offset = offset - lrintf(offset); // The part not covered by whole_pixel_offset.
assert(subpixel_offset >= -0.5f && subpixel_offset <= 0.5f);
+ float inv_scaling_factor = 1.0f / scaling_factor;
for (unsigned y = 0; y < dst_samples; ++y) {
// Find the point around which we want to sample the source image,
// compensating for differing pixel centers as the scale changes.
- float center_src_y = (y + 0.5f) / scaling_factor - 0.5f;
+ float center_src_y = (y + 0.5f) * inv_scaling_factor - 0.5f;
int base_src_y = lrintf(center_src_y);
// Now sample <int_radius> pixels on each side around that point.
+ float inv_src_size = 1.0 / float(src_size);
for (int i = 0; i < src_samples; ++i) {
int src_y = base_src_y + i - int_radius;
float weight = lanczos_weight_cached(radius_scaling_factor * (src_y - center_src_y - subpixel_offset));
weights[y * src_samples + i].weight = weight * radius_scaling_factor;
- weights[y * src_samples + i].pos = (src_y + 0.5) / float(src_size);
+ weights[y * src_samples + i].pos = (src_y + 0.5f) * inv_src_size;
}
}
+ ScalingWeights ret;
+ ret.src_bilinear_samples = src_samples;
+ ret.dst_samples = dst_samples;
+ ret.num_loops = num_loops;
+ ret.bilinear_weights_fp16 = nullptr;
+ ret.bilinear_weights_fp32 = move(weights);
+ return ret;
+}
+
+} // namespace
+
+ScalingWeights calculate_bilinear_scaling_weights(unsigned src_size, unsigned dst_size, float zoom, float offset)
+{
+ ScalingWeights ret = calculate_scaling_weights(src_size, dst_size, zoom, offset);
+ unique_ptr<Tap<float>[]> weights = move(ret.bilinear_weights_fp32);
+ const int src_samples = ret.src_bilinear_samples;
+
// Now make use of the bilinear filtering in the GPU to reduce the number of samples
// we need to make. Try fp16 first; if it's not accurate enough, we go to fp32.
// Our tolerance level for total error is a bit higher than the one for invididual
// samples, since one would assume overall errors in the shape don't matter as much.
const float max_error = 2.0f / (255.0f * 255.0f);
- Tap<fp16_int_t> *bilinear_weights_fp16;
- src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp16);
- Tap<float> *bilinear_weights_fp32 = NULL;
- bool fallback_to_fp32 = false;
+ unique_ptr<Tap<fp16_int_t>[]> bilinear_weights_fp16;
+ int src_bilinear_samples = combine_many_samples(weights.get(), src_size, src_samples, ret.dst_samples, &bilinear_weights_fp16);
+ unique_ptr<Tap<float>[]> bilinear_weights_fp32 = nullptr;
double max_sum_sq_error_fp16 = 0.0;
- for (unsigned y = 0; y < dst_samples; ++y) {
+ for (unsigned y = 0; y < ret.dst_samples; ++y) {
double sum_sq_error_fp16 = compute_sum_sq_error(
- weights + y * src_samples, src_samples,
- bilinear_weights_fp16 + y * src_bilinear_samples, src_bilinear_samples,
+ weights.get() + y * src_samples, src_samples,
+ bilinear_weights_fp16.get() + y * src_bilinear_samples, src_bilinear_samples,
src_size);
max_sum_sq_error_fp16 = std::max(max_sum_sq_error_fp16, sum_sq_error_fp16);
if (max_sum_sq_error_fp16 > max_error) {
}
if (max_sum_sq_error_fp16 > max_error) {
- fallback_to_fp32 = true;
- src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp32);
+ bilinear_weights_fp16.reset();
+ src_bilinear_samples = combine_many_samples(weights.get(), src_size, src_samples, ret.dst_samples, &bilinear_weights_fp32);
}
- // Encode as a two-component texture. Note the GL_REPEAT.
- glActiveTexture(GL_TEXTURE0 + *sampler_num);
- check_error();
- glBindTexture(GL_TEXTURE_2D, texnum);
- check_error();
- if (last_texture_width == -1) {
- // Need to set this state the first time.
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- check_error();
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
- check_error();
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
- check_error();
- }
-
- GLenum type, internal_format;
- void *pixels;
- if (fallback_to_fp32) {
- type = GL_FLOAT;
- internal_format = GL_RG32F;
- pixels = bilinear_weights_fp32;
- } else {
- type = GL_HALF_FLOAT;
- internal_format = GL_RG16F;
- pixels = bilinear_weights_fp16;
- }
-
- if (int(src_bilinear_samples) == last_texture_width &&
- int(dst_samples) == last_texture_height &&
- internal_format == last_texture_internal_format) {
- // Texture dimensions and type are unchanged; it is more efficient
- // to just update it rather than making an entirely new texture.
- glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, src_bilinear_samples, dst_samples, GL_RG, type, pixels);
- } else {
- glTexImage2D(GL_TEXTURE_2D, 0, internal_format, src_bilinear_samples, dst_samples, 0, GL_RG, type, pixels);
- last_texture_width = src_bilinear_samples;
- last_texture_height = dst_samples;
- last_texture_internal_format = internal_format;
- }
- check_error();
-
- delete[] weights;
- delete[] bilinear_weights_fp16;
- delete[] bilinear_weights_fp32;
+ ret.src_bilinear_samples = src_bilinear_samples;
+ ret.bilinear_weights_fp16 = move(bilinear_weights_fp16);
+ ret.bilinear_weights_fp32 = move(bilinear_weights_fp32);
+ return ret;
}
void SingleResamplePassEffect::set_gl_state(GLuint glsl_program_num, const string &prefix, unsigned *sampler_num)
glActiveTexture(GL_TEXTURE0 + *sampler_num);
check_error();
- glBindTexture(GL_TEXTURE_2D, texnum);
+ glBindTexture(GL_TEXTURE_2D, tex.get_texnum());
check_error();
uniform_sample_tex = *sampler_num;
} else {
uniform_whole_pixel_offset = lrintf(offset) / float(input_width);
}
+}
- // We specifically do not want mipmaps on the input texture;
- // they break minification.
- Node *self = chain->find_node_for_effect(this);
- glActiveTexture(chain->get_input_sampler(self, 0));
+Support2DTexture::Support2DTexture()
+{
+ glGenTextures(1, &texnum);
check_error();
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ glBindTexture(GL_TEXTURE_2D, texnum);
+ check_error();
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ check_error();
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ check_error();
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ check_error();
+}
+
+Support2DTexture::~Support2DTexture()
+{
+ glDeleteTextures(1, &texnum);
+ check_error();
+}
+
+void Support2DTexture::update(GLint width, GLint height, GLenum internal_format, GLenum format, GLenum type, const GLvoid * data)
+{
+ glBindTexture(GL_TEXTURE_2D, texnum);
check_error();
+ if (width == last_texture_width &&
+ height == last_texture_height &&
+ internal_format == last_texture_internal_format) {
+ // Texture dimensions and type are unchanged; it is more efficient
+ // to just update it rather than making an entirely new texture.
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, format, type, data);
+ check_error();
+ } else {
+ glTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0, format, type, data);
+ check_error();
+ last_texture_width = width;
+ last_texture_height = height;
+ last_texture_internal_format = internal_format;
+ }
}
} // namespace movit