]> git.sesse.net Git - movit/blobdiff - resample_effect.cpp
Reuse the VAO across all phases.
[movit] / resample_effect.cpp
index f438a873b095114554bb1b53a41cebe263d16f4b..4dd3ea87e261086939a91e2d26d51ef52c813168 100644 (file)
@@ -63,7 +63,22 @@ unsigned gcd(unsigned a, unsigned b)
 template<class DestFloat>
 unsigned combine_samples(const Tap<float> *src, Tap<DestFloat> *dst, unsigned src_size, unsigned num_src_samples, unsigned max_samples_saved)
 {
+       // Cut off near-zero values at both sides.
        unsigned num_samples_saved = 0;
+       while (num_samples_saved < max_samples_saved &&
+              num_src_samples > 0 &&
+              fabs(src[0].weight) < 1e-6) {
+               ++src;
+               --num_src_samples;
+               ++num_samples_saved;
+       }
+       while (num_samples_saved < max_samples_saved &&
+              num_src_samples > 0 &&
+              fabs(src[num_src_samples - 1].weight) < 1e-6) {
+               --num_src_samples;
+               ++num_samples_saved;
+       }
+
        for (unsigned i = 0, j = 0; i < num_src_samples; ++i, ++j) {
                // Copy the sample directly; it will be overwritten later if we can combine.
                if (dst != NULL) {
@@ -183,7 +198,7 @@ double compute_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
        int lower_pos = int(floor(to_fp64(bilinear_weights[0].pos) * size - 0.5));
        int upper_pos = int(ceil(to_fp64(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5)) + 2;
        lower_pos = min<int>(lower_pos, lrintf(weights[0].pos * size - 0.5));
-       upper_pos = max<int>(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5));
+       upper_pos = max<int>(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5) + 1);
 
        float* effective_weights = new float[upper_pos - lower_pos];
        for (int i = 0; i < upper_pos - lower_pos; ++i) {
@@ -224,112 +239,6 @@ double compute_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
        return sum_sq_error;
 }
 
-// Given a predefined, fixed set of bilinear weight positions, try to optimize
-// their weights through some linear algebra. This can do a better job than
-// the weight calculation in combine_samples() because it can look at the entire
-// picture (an effective weight can sometimes be affected by multiple samples).
-// It will also optimize weights for non-combined samples, which is useful when
-// a sample happens in-between texels for numerical reasons.
-//
-// The math goes as follows: The desired result is a weighted sum, where the
-// weights are the coefficients in <weights>:
-//
-//   y = sum(c_j x_j, j)
-//
-// We try to approximate this by a different set of coefficients, which have
-// weights d_i and are placed at some fraction to the right of a source texel x_j.
-// This means it will influence two texels (x_j and x_{j+1}); generalizing this,
-// let us define that w_ij means the amount texel <j> influences bilinear weight
-// <i> (keeping in mind that w_ij = 0 for all but at most two different j).
-// This means the actually computed result is:
-//
-//   y' = sum(d_i w_ij x_j, j)
-//
-// We assume w_ij fixed and wish to find {d_i} so that y' gets as close to y
-// as possible. Specifically, let us consider the sum of squred errors of the
-// coefficients:
-//
-//   ε² = sum((sum( d_i w_ij, i ) - c_j)², j)
-//
-// The standard trick, which also applies just fine here, is to differentiate
-// the error with respect to each variable we wish to optimize, and set each
-// such expression to zero. Solving this equation set (which we can do efficiently
-// by letting Eigen invert a sparse matrix for us) yields the minimum possible
-// error. To see the form each such equation takes, pick any value k and
-// differentiate the expression by d_k:
-//
-//   ∂(ε²)/∂(d_k) = sum(2(sum( d_i w_ij, i ) - c_j) w_kj, j)
-//
-// Setting this expression equal to zero, dropping the irrelevant factor 2 and
-// rearranging yields:
-//
-//   sum(w_kj sum( d_i w_ij, i ), j) = sum(w_kj c_j, j)
-//
-// where again, we remember where the sums over j are over at most two elements,
-// since w_kj is nonzero for at most two values of j.
-template<class T>
-void optimize_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
-                           Tap<T>* bilinear_weights, unsigned num_bilinear_weights,
-                           unsigned size)
-{
-       // Find the range of the desired weights.
-       int c_lower_pos = lrintf(weights[0].pos * size - 0.5);
-       int c_upper_pos = lrintf(weights[num_weights - 1].pos * size - 0.5) + 1;
-
-       SparseMatrix<float> A(num_bilinear_weights, num_bilinear_weights);
-       SparseVector<float> b(num_bilinear_weights);
-
-       // Convert each bilinear weight to the (x, frac) form for less junk in the code below.
-       int* pos = new int[num_bilinear_weights];
-       float* fracs = new float[num_bilinear_weights];
-       for (unsigned i = 0; i < num_bilinear_weights; ++i) {
-               const float pixel_pos = to_fp64(bilinear_weights[i].pos) * size - 0.5f;
-               const float f = pixel_pos - floor(pixel_pos);
-               pos[i] = int(floor(pixel_pos));
-               fracs[i] = lrintf(f / movit_texel_subpixel_precision) * movit_texel_subpixel_precision;
-       }
-
-       // The index ordering is a bit unusual to fit better with the
-       // notation in the derivation above.
-       for (unsigned k = 0; k < num_bilinear_weights; ++k) {
-               for (int j = pos[k]; j <= pos[k] + 1; ++j) {
-                       const float w_kj = (j == pos[k]) ? (1.0f - fracs[k]) : fracs[k];
-                       for (unsigned i = 0; i < num_bilinear_weights; ++i) {
-                               float w_ij;
-                               if (j == pos[i]) {
-                                       w_ij = 1.0f - fracs[i];
-                               } else if (j == pos[i] + 1) {
-                                       w_ij = fracs[i];
-                               } else {
-                                       // w_ij = 0
-                                       continue;
-                               }
-                               A.coeffRef(i, k) += w_kj * w_ij;
-                       }
-                       float c_j;
-                       if (j >= c_lower_pos && j < c_upper_pos) {
-                               c_j = weights[j - c_lower_pos].weight;
-                       } else {
-                               c_j = 0.0f;
-                       }
-                       b.coeffRef(k) += w_kj * c_j;
-               }
-       }
-       delete[] pos;
-       delete[] fracs;
-
-       A.makeCompressed();
-       SparseQR<SparseMatrix<float>, COLAMDOrdering<int> > qr(A);
-       assert(qr.info() == Success);
-       SparseMatrix<float> new_weights = qr.solve(b);
-       assert(qr.info() == Success);
-
-       for (unsigned i = 0; i < num_bilinear_weights; ++i) {
-               bilinear_weights[i].weight = from_fp64<T>(new_weights.coeff(i, 0));
-       }
-       normalize_sum(bilinear_weights, num_bilinear_weights);
-}
-
 }  // namespace
 
 ResampleEffect::ResampleEffect()
@@ -471,7 +380,8 @@ SingleResamplePassEffect::SingleResamplePassEffect(ResampleEffect *parent)
          last_output_width(-1),
          last_output_height(-1),
          last_offset(0.0 / 0.0),  // NaN.
-         last_zoom(0.0 / 0.0)  // NaN.
+         last_zoom(0.0 / 0.0),  // NaN.
+         last_texture_width(-1), last_texture_height(-1)
 {
        register_int("direction", (int *)&direction);
        register_int("input_width", &input_width);
@@ -480,6 +390,13 @@ SingleResamplePassEffect::SingleResamplePassEffect(ResampleEffect *parent)
        register_int("output_height", &output_height);
        register_float("offset", &offset);
        register_float("zoom", &zoom);
+       register_uniform_sampler2d("sample_tex", &uniform_sample_tex);
+       register_uniform_int("num_samples", &uniform_num_samples);  // FIXME: What about GLSL pre-1.30?
+       register_uniform_float("num_loops", &uniform_num_loops);
+       register_uniform_float("slice_height", &uniform_slice_height);
+       register_uniform_float("sample_x_scale", &uniform_sample_x_scale);
+       register_uniform_float("sample_x_offset", &uniform_sample_x_offset);
+       register_uniform_float("whole_pixel_offset", &uniform_whole_pixel_offset);
 
        glGenTextures(1, &texnum);
 }
@@ -616,34 +533,28 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str
 
        // Now make use of the bilinear filtering in the GPU to reduce the number of samples
        // we need to make. Try fp16 first; if it's not accurate enough, we go to fp32.
+       // Our tolerance level for total error is a bit higher than the one for invididual
+       // samples, since one would assume overall errors in the shape don't matter as much.
+       const float max_error = 2.0f / (255.0f * 255.0f);
        Tap<fp16_int_t> *bilinear_weights_fp16;
        src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp16);
        Tap<float> *bilinear_weights_fp32 = NULL;
        bool fallback_to_fp32 = false;
        double max_sum_sq_error_fp16 = 0.0;
        for (unsigned y = 0; y < dst_samples; ++y) {
-               optimize_sum_sq_error(
-                       weights + y * src_samples, src_samples,
-                       bilinear_weights_fp16 + y * src_bilinear_samples, src_bilinear_samples,
-                       src_size);
                double sum_sq_error_fp16 = compute_sum_sq_error(
                        weights + y * src_samples, src_samples,
                        bilinear_weights_fp16 + y * src_bilinear_samples, src_bilinear_samples,
                        src_size);
                max_sum_sq_error_fp16 = std::max(max_sum_sq_error_fp16, sum_sq_error_fp16);
+               if (max_sum_sq_error_fp16 > max_error) {
+                       break;
+               }
        }
 
-       // Our tolerance level for total error is a bit higher than the one for invididual
-       // samples, since one would assume overall errors in the shape don't matter as much.
-       if (max_sum_sq_error_fp16 > 2.0f / (255.0f * 255.0f)) {
+       if (max_sum_sq_error_fp16 > max_error) {
                fallback_to_fp32 = true;
                src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp32);
-               for (unsigned y = 0; y < dst_samples; ++y) {
-                       optimize_sum_sq_error(
-                               weights + y * src_samples, src_samples,
-                               bilinear_weights_fp32 + y * src_bilinear_samples, src_bilinear_samples,
-                               src_size);
-               }
        }
 
        // Encode as a two-component texture. Note the GL_REPEAT.
@@ -651,16 +562,39 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str
        check_error();
        glBindTexture(GL_TEXTURE_2D, texnum);
        check_error();
-       glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
-       check_error();
-       glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
-       check_error();
-       glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
-       check_error();
+       if (last_texture_width == -1) {
+               // Need to set this state the first time.
+               glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+               check_error();
+               glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
+               check_error();
+               glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+               check_error();
+       }
+
+       GLenum type, internal_format;
+       void *pixels;
        if (fallback_to_fp32) {
-               glTexImage2D(GL_TEXTURE_2D, 0, GL_RG32F, src_bilinear_samples, dst_samples, 0, GL_RG, GL_FLOAT, bilinear_weights_fp32);
+               type = GL_FLOAT;
+               internal_format = GL_RG32F;
+               pixels = bilinear_weights_fp32;
+       } else {
+               type = GL_HALF_FLOAT;
+               internal_format = GL_RG16F;
+               pixels = bilinear_weights_fp16;
+       }
+
+       if (int(src_bilinear_samples) == last_texture_width &&
+           int(dst_samples) == last_texture_height &&
+           internal_format == last_texture_internal_format) {
+               // Texture dimensions and type are unchanged; it is more efficient
+               // to just update it rather than making an entirely new texture.
+               glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, src_bilinear_samples, dst_samples, GL_RG, type, pixels);
        } else {
-               glTexImage2D(GL_TEXTURE_2D, 0, GL_RG16F, src_bilinear_samples, dst_samples, 0, GL_RG, GL_HALF_FLOAT, bilinear_weights_fp16);
+               glTexImage2D(GL_TEXTURE_2D, 0, internal_format, src_bilinear_samples, dst_samples, 0, GL_RG, type, pixels);
+               last_texture_width = src_bilinear_samples;
+               last_texture_height = dst_samples;
+               last_texture_internal_format = internal_format;
        }
        check_error();
 
@@ -698,23 +632,21 @@ void SingleResamplePassEffect::set_gl_state(GLuint glsl_program_num, const strin
        glBindTexture(GL_TEXTURE_2D, texnum);
        check_error();
 
-       set_uniform_int(glsl_program_num, prefix, "sample_tex", *sampler_num);
+       uniform_sample_tex = *sampler_num;
        ++*sampler_num;
-       set_uniform_int(glsl_program_num, prefix, "num_samples", src_bilinear_samples);
-       set_uniform_float(glsl_program_num, prefix, "num_loops", num_loops);
-       set_uniform_float(glsl_program_num, prefix, "slice_height", slice_height);
+       uniform_num_samples = src_bilinear_samples;
+       uniform_num_loops = num_loops;
+       uniform_slice_height = slice_height;
 
        // Instructions for how to convert integer sample numbers to positions in the weight texture.
-       set_uniform_float(glsl_program_num, prefix, "sample_x_scale", 1.0f / src_bilinear_samples);
-       set_uniform_float(glsl_program_num, prefix, "sample_x_offset", 0.5f / src_bilinear_samples);
+       uniform_sample_x_scale = 1.0f / src_bilinear_samples;
+       uniform_sample_x_offset = 0.5f / src_bilinear_samples;
 
-       float whole_pixel_offset;
        if (direction == SingleResamplePassEffect::VERTICAL) {
-               whole_pixel_offset = lrintf(offset) / float(input_height);
+               uniform_whole_pixel_offset = lrintf(offset) / float(input_height);
        } else {
-               whole_pixel_offset = lrintf(offset) / float(input_width);
+               uniform_whole_pixel_offset = lrintf(offset) / float(input_width);
        }
-       set_uniform_float(glsl_program_num, prefix, "whole_pixel_offset", whole_pixel_offset);
 
        // We specifically do not want mipmaps on the input texture;
        // they break minification.