// Three-lobed Lanczos, the most common choice.
-#define LANCZOS_RADIUS 3.0
+// Note that if you change this, the accuracy for LANCZOS_TABLE_SIZE
+// needs to be recomputed.
+#define LANCZOS_RADIUS 3.0f
#include <epoxy/gl.h>
#include <assert.h>
namespace {
-template<class T>
-struct Tap {
- T weight;
- T pos;
-};
-
float sinc(float x)
{
if (fabs(x) < 1e-6) {
}
}
-float lanczos_weight(float x, float a)
+float lanczos_weight(float x)
{
- if (fabs(x) > a) {
+ if (fabs(x) > LANCZOS_RADIUS) {
return 0.0f;
} else {
- return sinc(M_PI * x) * sinc(M_PI * x / a);
+ return sinc(M_PI * x) * sinc((M_PI / LANCZOS_RADIUS) * x);
}
}
+// The weight function can be expensive to compute over and over again
+// (which will happen during e.g. a zoom), but it is also easy to interpolate
+// linearly. We compute the right half of the function (in the range of
+// 0..LANCZOS_RADIUS), with two guard elements for easier interpolation, and
+// linearly interpolate to get our function.
+//
+// We want to scale the table so that the maximum error is always smaller
+// than 1e-6. As per http://www-solar.mcs.st-andrews.ac.uk/~clare/Lectures/num-analysis/Numan_chap3.pdf,
+// the error for interpolating a function linearly between points [a,b] is
+//
+// e = 1/2 (x-a)(x-b) f''(u_x)
+//
+// for some point u_x in [a,b] (where f(x) is our Lanczos function; we're
+// assuming LANCZOS_RADIUS=3 from here on). Obviously this is bounded by
+// f''(x) over the entire range. Numeric optimization shows the maximum of
+// |f''(x)| to be in x=1.09369819474562880, with the value 2.40067758733152381.
+// So if the steps between consecutive values are called d, we get
+//
+// |e| <= 1/2 (d/2)^2 2.4007
+// |e| <= 0.1367 d^2
+//
+// Solve for e = 1e-6 yields a step size of 0.0027, which to cover the range
+// 0..3 needs 1109 steps. We round up to the next power of two, just to be sure.
+//
+// You need to call lanczos_table_init_done before the first call to
+// lanczos_weight_cached.
+#define LANCZOS_TABLE_SIZE 2048
+bool lanczos_table_init_done = false;
+float lanczos_table[LANCZOS_TABLE_SIZE + 2];
+
+void init_lanczos_table()
+{
+ for (unsigned i = 0; i < LANCZOS_TABLE_SIZE + 2; ++i) {
+ lanczos_table[i] = lanczos_weight(float(i) * (LANCZOS_RADIUS / LANCZOS_TABLE_SIZE));
+ }
+ lanczos_table_init_done = true;
+}
+
+float lanczos_weight_cached(float x)
+{
+ x = fabs(x);
+ if (x > LANCZOS_RADIUS) {
+ return 0.0f;
+ }
+ float table_pos = x * (LANCZOS_TABLE_SIZE / LANCZOS_RADIUS);
+ unsigned table_pos_int = int(table_pos); // Truncate towards zero.
+ float table_pos_frac = table_pos - table_pos_int;
+ assert(table_pos < LANCZOS_TABLE_SIZE + 2);
+ return lanczos_table[table_pos_int] +
+ table_pos_frac * (lanczos_table[table_pos_int + 1] - lanczos_table[table_pos_int]);
+}
+
// Euclid's algorithm, from Wikipedia.
unsigned gcd(unsigned a, unsigned b)
{
}
template<class DestFloat>
-unsigned combine_samples(const Tap<float> *src, Tap<DestFloat> *dst, unsigned src_size, unsigned num_src_samples, unsigned max_samples_saved)
+unsigned combine_samples(const Tap<float> *src, Tap<DestFloat> *dst, float num_subtexels, float inv_num_subtexels, unsigned num_src_samples, unsigned max_samples_saved, float pos1_pos2_diff, float inv_pos1_pos2_diff)
{
+ // Cut off near-zero values at both sides.
unsigned num_samples_saved = 0;
+ while (num_samples_saved < max_samples_saved &&
+ num_src_samples > 0 &&
+ fabs(src[0].weight) < 1e-6) {
+ ++src;
+ --num_src_samples;
+ ++num_samples_saved;
+ }
+ while (num_samples_saved < max_samples_saved &&
+ num_src_samples > 0 &&
+ fabs(src[num_src_samples - 1].weight) < 1e-6) {
+ --num_src_samples;
+ ++num_samples_saved;
+ }
+
for (unsigned i = 0, j = 0; i < num_src_samples; ++i, ++j) {
// Copy the sample directly; it will be overwritten later if we can combine.
if (dst != NULL) {
float pos2 = src[i + 1].pos;
assert(pos2 > pos1);
- fp16_int_t pos, total_weight;
+ DestFloat pos, total_weight;
float sum_sq_error;
- combine_two_samples(w1, w2, pos1, pos2, src_size, &pos, &total_weight, &sum_sq_error);
+ combine_two_samples(w1, w2, pos1, pos1_pos2_diff, inv_pos1_pos2_diff, num_subtexels, inv_num_subtexels, &pos, &total_weight, &sum_sq_error);
// If the interpolation error is larger than that of about sqrt(2) of
// a level at 8-bit precision, don't combine. (You'd think 1.0 was enough,
void normalize_sum(Tap<T>* vals, unsigned num)
{
for (int normalize_pass = 0; normalize_pass < 2; ++normalize_pass) {
- double sum = 0.0;
+ float sum = 0.0;
for (unsigned i = 0; i < num; ++i) {
- sum += to_fp64(vals[i].weight);
+ sum += to_fp32(vals[i].weight);
}
+ float inv_sum = 1.0 / sum;
for (unsigned i = 0; i < num; ++i) {
- vals[i].weight = from_fp64<T>(to_fp64(vals[i].weight) / sum);
+ vals[i].weight = from_fp32<T>(to_fp32(vals[i].weight) * inv_sum);
}
}
}
//
// The greedy strategy for combining samples is optimal.
template<class DestFloat>
-unsigned combine_many_samples(const Tap<float> *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, Tap<DestFloat> **bilinear_weights)
+unsigned combine_many_samples(const Tap<float> *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, unique_ptr<Tap<DestFloat>[]> *bilinear_weights)
{
- int src_bilinear_samples = 0;
- for (unsigned y = 0; y < dst_samples; ++y) {
- unsigned num_samples_saved = combine_samples<DestFloat>(weights + y * src_samples, NULL, src_size, src_samples, UINT_MAX);
- src_bilinear_samples = max<int>(src_bilinear_samples, src_samples - num_samples_saved);
+ float num_subtexels = src_size / movit_texel_subpixel_precision;
+ float inv_num_subtexels = movit_texel_subpixel_precision / src_size;
+ float pos1_pos2_diff = 1.0f / src_size;
+ float inv_pos1_pos2_diff = src_size;
+
+ unsigned max_samples_saved = UINT_MAX;
+ for (unsigned y = 0; y < dst_samples && max_samples_saved > 0; ++y) {
+ unsigned num_samples_saved = combine_samples<DestFloat>(weights + y * src_samples, NULL, num_subtexels, inv_num_subtexels, src_samples, max_samples_saved, pos1_pos2_diff, inv_pos1_pos2_diff);
+ max_samples_saved = min(max_samples_saved, num_samples_saved);
}
// Now that we know the right width, actually combine the samples.
- *bilinear_weights = new Tap<DestFloat>[dst_samples * src_bilinear_samples];
+ unsigned src_bilinear_samples = src_samples - max_samples_saved;
+ bilinear_weights->reset(new Tap<DestFloat>[dst_samples * src_bilinear_samples]);
for (unsigned y = 0; y < dst_samples; ++y) {
- Tap<DestFloat> *bilinear_weights_ptr = *bilinear_weights + y * src_bilinear_samples;
+ Tap<DestFloat> *bilinear_weights_ptr = bilinear_weights->get() + y * src_bilinear_samples;
unsigned num_samples_saved = combine_samples(
weights + y * src_samples,
bilinear_weights_ptr,
- src_size,
+ num_subtexels,
+ inv_num_subtexels,
src_samples,
- src_samples - src_bilinear_samples);
- assert(int(src_samples) - int(num_samples_saved) == src_bilinear_samples);
+ max_samples_saved,
+ pos1_pos2_diff,
+ inv_pos1_pos2_diff);
+ assert(num_samples_saved == max_samples_saved);
normalize_sum(bilinear_weights_ptr, src_bilinear_samples);
}
return src_bilinear_samples;
// Find the effective range of the bilinear-optimized kernel.
// Due to rounding of the positions, this is not necessarily the same
// as the intended range (ie., the range of the original weights).
- int lower_pos = int(floor(to_fp64(bilinear_weights[0].pos) * size - 0.5));
- int upper_pos = int(ceil(to_fp64(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5)) + 2;
- lower_pos = min<int>(lower_pos, lrintf(weights[0].pos * size - 0.5));
- upper_pos = max<int>(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5));
+ int lower_pos = int(floor(to_fp32(bilinear_weights[0].pos) * size - 0.5f));
+ int upper_pos = int(ceil(to_fp32(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5f)) + 2;
+ lower_pos = min<int>(lower_pos, lrintf(weights[0].pos * size - 0.5f));
+ upper_pos = max<int>(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5f) + 1);
float* effective_weights = new float[upper_pos - lower_pos];
for (int i = 0; i < upper_pos - lower_pos; ++i) {
// Now find the effective weights that result from this sampling.
for (unsigned i = 0; i < num_bilinear_weights; ++i) {
- const float pixel_pos = to_fp64(bilinear_weights[i].pos) * size - 0.5f;
+ const float pixel_pos = to_fp32(bilinear_weights[i].pos) * size - 0.5f;
const int x0 = int(floor(pixel_pos)) - lower_pos;
const int x1 = x0 + 1;
const float f = lrintf((pixel_pos - (x0 + lower_pos)) / movit_texel_subpixel_precision) * movit_texel_subpixel_precision;
assert(x0 < upper_pos - lower_pos);
assert(x1 < upper_pos - lower_pos);
- effective_weights[x0] += to_fp64(bilinear_weights[i].weight) * (1.0 - f);
- effective_weights[x1] += to_fp64(bilinear_weights[i].weight) * f;
+ effective_weights[x0] += to_fp32(bilinear_weights[i].weight) * (1.0f - f);
+ effective_weights[x1] += to_fp32(bilinear_weights[i].weight) * f;
}
// Subtract the desired weights to get the error.
return sum_sq_error;
}
-// Given a predefined, fixed set of bilinear weight positions, try to optimize
-// their weights through some linear algebra. This can do a better job than
-// the weight calculation in combine_samples() because it can look at the entire
-// picture (an effective weight can sometimes be affected by multiple samples).
-// It will also optimize weights for non-combined samples, which is useful when
-// a sample happens in-between texels for numerical reasons.
-//
-// The math goes as follows: The desired result is a weighted sum, where the
-// weights are the coefficients in <weights>:
-//
-// y = sum(c_j x_j, j)
-//
-// We try to approximate this by a different set of coefficients, which have
-// weights d_i and are placed at some fraction to the right of a source texel x_j.
-// This means it will influence two texels (x_j and x_{j+1}); generalizing this,
-// let us define that w_ij means the amount texel <j> influences bilinear weight
-// <i> (keeping in mind that w_ij = 0 for all but at most two different j).
-// This means the actually computed result is:
-//
-// y' = sum(d_i w_ij x_j, j)
-//
-// We assume w_ij fixed and wish to find {d_i} so that y' gets as close to y
-// as possible. Specifically, let us consider the sum of squred errors of the
-// coefficients:
-//
-// ε² = sum((sum( d_i w_ij, i ) - c_j)², j)
-//
-// The standard trick, which also applies just fine here, is to differentiate
-// the error with respect to each variable we wish to optimize, and set each
-// such expression to zero. Solving this equation set (which we can do efficiently
-// by letting Eigen invert a sparse matrix for us) yields the minimum possible
-// error. To see the form each such equation takes, pick any value k and
-// differentiate the expression by d_k:
-//
-// ∂(ε²)/∂(d_k) = sum(2(sum( d_i w_ij, i ) - c_j) w_kj, j)
-//
-// Setting this expression equal to zero, dropping the irrelevant factor 2 and
-// rearranging yields:
-//
-// sum(w_kj sum( d_i w_ij, i ), j) = sum(w_kj c_j, j)
-//
-// where again, we remember where the sums over j are over at most two elements,
-// since w_ij is nonzero for at most two values of j.
-template<class T>
-void optimize_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
- Tap<T>* bilinear_weights, unsigned num_bilinear_weights,
- unsigned size)
-{
- // Find the range of the desired weights.
- int c_lower_pos = lrintf(weights[0].pos * size - 0.5);
- int c_upper_pos = lrintf(weights[num_weights - 1].pos * size - 0.5) + 1;
-
- SparseMatrix<float> A(num_bilinear_weights, num_bilinear_weights);
- SparseVector<float> b(num_bilinear_weights);
-
- // Convert each bilinear weight to the (x, frac) form for less junk in the code below.
- int* pos = new int[num_bilinear_weights];
- float* fracs = new float[num_bilinear_weights];
- for (unsigned i = 0; i < num_bilinear_weights; ++i) {
- const float pixel_pos = to_fp64(bilinear_weights[i].pos) * size - 0.5f;
- const float f = pixel_pos - floor(pixel_pos);
- pos[i] = int(floor(pixel_pos));
- fracs[i] = lrintf(f / movit_texel_subpixel_precision) * movit_texel_subpixel_precision;
- }
-
- // The index ordering is a bit unusual to fit better with the
- // notation in the derivation above.
- for (unsigned k = 0; k < num_bilinear_weights; ++k) {
- for (int j = pos[k]; j <= pos[k] + 1; ++j) {
- const float f_kj = (j == pos[k]) ? (1.0f - fracs[k]) : fracs[k];
- for (unsigned i = 0; i < num_bilinear_weights; ++i) {
- float f_ij;
- if (j == pos[i]) {
- f_ij = 1.0f - fracs[i];
- } else if (j == pos[i] + 1) {
- f_ij = fracs[i];
- } else {
- // f_ij = 0
- continue;
- }
- A.coeffRef(i, k) += f_kj * f_ij;
- }
- float c_j;
- if (j >= c_lower_pos && j < c_upper_pos) {
- c_j = weights[j - c_lower_pos].weight;
- } else {
- c_j = 0.0f;
- }
- b.coeffRef(k) += f_kj * c_j;
- }
- }
- delete[] pos;
- delete[] fracs;
-
- A.makeCompressed();
- SparseQR<SparseMatrix<float>, COLAMDOrdering<int> > qr(A);
- assert(qr.info() == Success);
- SparseMatrix<float> new_weights = qr.solve(b);
- assert(qr.info() == Success);
-
- for (unsigned i = 0; i < num_bilinear_weights; ++i) {
- bilinear_weights[i].weight = from_fp64<T>(new_weights.coeff(i, 0));
- }
- normalize_sum(bilinear_weights, num_bilinear_weights);
-}
-
} // namespace
ResampleEffect::ResampleEffect()
last_output_width(-1),
last_output_height(-1),
last_offset(0.0 / 0.0), // NaN.
- last_zoom(0.0 / 0.0) // NaN.
+ last_zoom(0.0 / 0.0), // NaN.
+ last_texture_width(-1), last_texture_height(-1)
{
register_int("direction", (int *)&direction);
register_int("input_width", &input_width);
register_int("output_height", &output_height);
register_float("offset", &offset);
register_float("zoom", &zoom);
+ register_uniform_sampler2d("sample_tex", &uniform_sample_tex);
+ register_uniform_int("num_samples", &uniform_num_samples);
+ register_uniform_float("num_loops", &uniform_num_loops);
+ register_uniform_float("slice_height", &uniform_slice_height);
+ register_uniform_float("sample_x_scale", &uniform_sample_x_scale);
+ register_uniform_float("sample_x_offset", &uniform_sample_x_offset);
+ register_uniform_float("whole_pixel_offset", &uniform_whole_pixel_offset);
glGenTextures(1, &texnum);
+
+ if (!lanczos_table_init_done) {
+ // Could in theory race between two threads if we are unlucky,
+ // but that is harmless, since they'll write the same data.
+ init_lanczos_table();
+ }
}
SingleResamplePassEffect::~SingleResamplePassEffect()
assert(false);
}
+ ScalingWeights weights = calculate_scaling_weights(src_size, dst_size, zoom, offset);
+ src_bilinear_samples = weights.src_bilinear_samples;
+ num_loops = weights.num_loops;
+ slice_height = 1.0f / weights.num_loops;
+
+ // Encode as a two-component texture. Note the GL_REPEAT.
+ glActiveTexture(GL_TEXTURE0 + *sampler_num);
+ check_error();
+ glBindTexture(GL_TEXTURE_2D, texnum);
+ check_error();
+ if (last_texture_width == -1) {
+ // Need to set this state the first time.
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ check_error();
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ check_error();
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+ check_error();
+ }
+
+ GLenum type, internal_format;
+ void *pixels;
+ assert((weights.bilinear_weights_fp16 == nullptr) != (weights.bilinear_weights_fp32 == nullptr));
+ if (weights.bilinear_weights_fp32 != nullptr) {
+ type = GL_FLOAT;
+ internal_format = GL_RG32F;
+ pixels = weights.bilinear_weights_fp32.get();
+ } else {
+ type = GL_HALF_FLOAT;
+ internal_format = GL_RG16F;
+ pixels = weights.bilinear_weights_fp16.get();
+ }
+
+ if (int(weights.src_bilinear_samples) == last_texture_width &&
+ int(weights.dst_samples) == last_texture_height &&
+ internal_format == last_texture_internal_format) {
+ // Texture dimensions and type are unchanged; it is more efficient
+ // to just update it rather than making an entirely new texture.
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, weights.src_bilinear_samples, weights.dst_samples, GL_RG, type, pixels);
+ } else {
+ glTexImage2D(GL_TEXTURE_2D, 0, internal_format, weights.src_bilinear_samples, weights.dst_samples, 0, GL_RG, type, pixels);
+ last_texture_width = weights.src_bilinear_samples;
+ last_texture_height = weights.dst_samples;
+ last_texture_internal_format = internal_format;
+ }
+ check_error();
+}
+
+ScalingWeights calculate_scaling_weights(unsigned src_size, unsigned dst_size, float zoom, float offset)
+{
+ if (!lanczos_table_init_done) {
+ // Only needed if run from outside ResampleEffect.
+ init_lanczos_table();
+ }
+
// For many resamplings (e.g. 640 -> 1280), we will end up with the same
// set of samples over and over again in a loop. Thus, we can compute only
// the first such loop, and then ask the card to repeat the texture for us.
// This is both easier on the texture cache and lowers our CPU cost for
// generating the kernel somewhat.
float scaling_factor;
+ int num_loops;
if (fabs(zoom - 1.0f) < 1e-6) {
num_loops = gcd(src_size, dst_size);
scaling_factor = float(dst_size) / float(src_size);
num_loops = 1;
scaling_factor = zoom * float(dst_size) / float(src_size);
}
- slice_height = 1.0f / num_loops;
unsigned dst_samples = dst_size / num_loops;
// Sample the kernel in the right place. A diagram with a triangular kernel
float radius_scaling_factor = min(scaling_factor, 1.0f);
int int_radius = lrintf(LANCZOS_RADIUS / radius_scaling_factor);
int src_samples = int_radius * 2 + 1;
- Tap<float> *weights = new Tap<float>[dst_samples * src_samples];
+ unique_ptr<Tap<float>[]> weights(new Tap<float>[dst_samples * src_samples]);
float subpixel_offset = offset - lrintf(offset); // The part not covered by whole_pixel_offset.
assert(subpixel_offset >= -0.5f && subpixel_offset <= 0.5f);
for (unsigned y = 0; y < dst_samples; ++y) {
int base_src_y = lrintf(center_src_y);
// Now sample <int_radius> pixels on each side around that point.
+ float inv_src_size = 1.0 / float(src_size);
for (int i = 0; i < src_samples; ++i) {
int src_y = base_src_y + i - int_radius;
- float weight = lanczos_weight(radius_scaling_factor * (src_y - center_src_y - subpixel_offset), LANCZOS_RADIUS);
+ float weight = lanczos_weight_cached(radius_scaling_factor * (src_y - center_src_y - subpixel_offset));
weights[y * src_samples + i].weight = weight * radius_scaling_factor;
- weights[y * src_samples + i].pos = (src_y + 0.5) / float(src_size);
+ weights[y * src_samples + i].pos = (src_y + 0.5f) * inv_src_size;
}
}
// Now make use of the bilinear filtering in the GPU to reduce the number of samples
// we need to make. Try fp16 first; if it's not accurate enough, we go to fp32.
- Tap<fp16_int_t> *bilinear_weights_fp16;
- src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp16);
- Tap<float> *bilinear_weights_fp32 = NULL;
- bool fallback_to_fp32 = false;
+ // Our tolerance level for total error is a bit higher than the one for invididual
+ // samples, since one would assume overall errors in the shape don't matter as much.
+ const float max_error = 2.0f / (255.0f * 255.0f);
+ unique_ptr<Tap<fp16_int_t>[]> bilinear_weights_fp16;
+ int src_bilinear_samples = combine_many_samples(weights.get(), src_size, src_samples, dst_samples, &bilinear_weights_fp16);
+ unique_ptr<Tap<float>[]> bilinear_weights_fp32 = NULL;
double max_sum_sq_error_fp16 = 0.0;
for (unsigned y = 0; y < dst_samples; ++y) {
- optimize_sum_sq_error(
- weights + y * src_samples, src_samples,
- bilinear_weights_fp16 + y * src_bilinear_samples, src_bilinear_samples,
- src_size);
double sum_sq_error_fp16 = compute_sum_sq_error(
- weights + y * src_samples, src_samples,
- bilinear_weights_fp16 + y * src_bilinear_samples, src_bilinear_samples,
+ weights.get() + y * src_samples, src_samples,
+ bilinear_weights_fp16.get() + y * src_bilinear_samples, src_bilinear_samples,
src_size);
max_sum_sq_error_fp16 = std::max(max_sum_sq_error_fp16, sum_sq_error_fp16);
- }
-
- // Our tolerance level for total error is a bit higher than the one for invididual
- // samples, since one would assume overall errors in the shape don't matter as much.
- if (max_sum_sq_error_fp16 > 2.0f / (255.0f * 255.0f)) {
- fallback_to_fp32 = true;
- src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp32);
- for (unsigned y = 0; y < dst_samples; ++y) {
- optimize_sum_sq_error(
- weights + y * src_samples, src_samples,
- bilinear_weights_fp32 + y * src_bilinear_samples, src_bilinear_samples,
- src_size);
+ if (max_sum_sq_error_fp16 > max_error) {
+ break;
}
}
- // Encode as a two-component texture. Note the GL_REPEAT.
- glActiveTexture(GL_TEXTURE0 + *sampler_num);
- check_error();
- glBindTexture(GL_TEXTURE_2D, texnum);
- check_error();
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- check_error();
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
- check_error();
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
- check_error();
- if (fallback_to_fp32) {
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RG32F, src_bilinear_samples, dst_samples, 0, GL_RG, GL_FLOAT, bilinear_weights_fp32);
- } else {
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RG16F, src_bilinear_samples, dst_samples, 0, GL_RG, GL_HALF_FLOAT, bilinear_weights_fp16);
+ if (max_sum_sq_error_fp16 > max_error) {
+ bilinear_weights_fp16.reset();
+ src_bilinear_samples = combine_many_samples(weights.get(), src_size, src_samples, dst_samples, &bilinear_weights_fp32);
}
- check_error();
- delete[] weights;
- delete[] bilinear_weights_fp16;
- delete[] bilinear_weights_fp32;
+ ScalingWeights ret;
+ ret.src_bilinear_samples = src_bilinear_samples;
+ ret.dst_samples = dst_samples;
+ ret.num_loops = num_loops;
+ ret.bilinear_weights_fp16 = move(bilinear_weights_fp16);
+ ret.bilinear_weights_fp32 = move(bilinear_weights_fp32);
+ return ret;
}
void SingleResamplePassEffect::set_gl_state(GLuint glsl_program_num, const string &prefix, unsigned *sampler_num)
glBindTexture(GL_TEXTURE_2D, texnum);
check_error();
- set_uniform_int(glsl_program_num, prefix, "sample_tex", *sampler_num);
+ uniform_sample_tex = *sampler_num;
++*sampler_num;
- set_uniform_int(glsl_program_num, prefix, "num_samples", src_bilinear_samples);
- set_uniform_float(glsl_program_num, prefix, "num_loops", num_loops);
- set_uniform_float(glsl_program_num, prefix, "slice_height", slice_height);
+ uniform_num_samples = src_bilinear_samples;
+ uniform_num_loops = num_loops;
+ uniform_slice_height = slice_height;
// Instructions for how to convert integer sample numbers to positions in the weight texture.
- set_uniform_float(glsl_program_num, prefix, "sample_x_scale", 1.0f / src_bilinear_samples);
- set_uniform_float(glsl_program_num, prefix, "sample_x_offset", 0.5f / src_bilinear_samples);
+ uniform_sample_x_scale = 1.0f / src_bilinear_samples;
+ uniform_sample_x_offset = 0.5f / src_bilinear_samples;
- float whole_pixel_offset;
if (direction == SingleResamplePassEffect::VERTICAL) {
- whole_pixel_offset = lrintf(offset) / float(input_height);
+ uniform_whole_pixel_offset = lrintf(offset) / float(input_height);
} else {
- whole_pixel_offset = lrintf(offset) / float(input_width);
+ uniform_whole_pixel_offset = lrintf(offset) / float(input_width);
}
- set_uniform_float(glsl_program_num, prefix, "whole_pixel_offset", whole_pixel_offset);
// We specifically do not want mipmaps on the input texture;
// they break minification.
Node *self = chain->find_node_for_effect(this);
- glActiveTexture(chain->get_input_sampler(self, 0));
- check_error();
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- check_error();
+ if (chain->has_input_sampler(self, 0)) {
+ glActiveTexture(chain->get_input_sampler(self, 0));
+ check_error();
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ check_error();
+ }
}
} // namespace movit