]> git.sesse.net Git - movit/blobdiff - resample_effect.cpp
Bring the variable names in optimize_sum_sq_error() closer to the comments.
[movit] / resample_effect.cpp
index 792d86134ac54f875e5724bd63f715b7434d916c..7e44e9ba89424477a33370020039d32f0c37d723 100644 (file)
@@ -7,19 +7,30 @@
 #include <math.h>
 #include <stdio.h>
 #include <algorithm>
+#include <Eigen/Sparse>
+#include <Eigen/SparseQR>
+#include <Eigen/OrderingMethods>
 
 #include "effect_chain.h"
 #include "effect_util.h"
 #include "fp16.h"
+#include "init.h"
 #include "resample_effect.h"
 #include "util.h"
 
+using namespace Eigen;
 using namespace std;
 
 namespace movit {
 
 namespace {
 
+template<class T>
+struct Tap {
+       T weight;
+       T pos;
+};
+
 float sinc(float x)
 {
        if (fabs(x) < 1e-6) {
@@ -49,14 +60,15 @@ unsigned gcd(unsigned a, unsigned b)
        return a;
 }
 
-unsigned combine_samples(float *src, float *dst, unsigned num_src_samples, unsigned max_samples_saved)
+template<class DestFloat>
+unsigned combine_samples(const Tap<float> *src, Tap<DestFloat> *dst, unsigned src_size, unsigned num_src_samples, unsigned max_samples_saved)
 {
        unsigned num_samples_saved = 0;
        for (unsigned i = 0, j = 0; i < num_src_samples; ++i, ++j) {
                // Copy the sample directly; it will be overwritten later if we can combine.
                if (dst != NULL) {
-                       dst[j * 2 + 0] = src[i * 2 + 0];
-                       dst[j * 2 + 1] = src[i * 2 + 1];
+                       dst[j].weight = convert_float<float, DestFloat>(src[i].weight);
+                       dst[j].pos = convert_float<float, DestFloat>(src[i].pos);
                }
 
                if (i == num_src_samples - 1) {
@@ -69,33 +81,34 @@ unsigned combine_samples(float *src, float *dst, unsigned num_src_samples, unsig
                        continue;
                }
 
-               float w1 = src[i * 2 + 0];
-               float w2 = src[(i + 1) * 2 + 0];
+               float w1 = src[i].weight;
+               float w2 = src[i + 1].weight;
                if (w1 * w2 < 0.0f) {
                        // Differing signs; cannot combine.
                        continue;
                }
 
-               float pos1 = src[i * 2 + 1];
-               float pos2 = src[(i + 1) * 2 + 1];
+               float pos1 = src[i].pos;
+               float pos2 = src[i + 1].pos;
                assert(pos2 > pos1);
 
-               float offset, total_weight, sum_sq_error;
-               combine_two_samples(w1, w2, &offset, &total_weight, &sum_sq_error);
+               fp16_int_t pos, total_weight;
+               float sum_sq_error;
+               combine_two_samples(w1, w2, pos1, pos2, src_size, &pos, &total_weight, &sum_sq_error);
 
                // If the interpolation error is larger than that of about sqrt(2) of
                // a level at 8-bit precision, don't combine. (You'd think 1.0 was enough,
                // but since the artifacts are not really random, they can get quite
                // visible. On the other hand, going to 0.25f, I can see no change at
                // all with 8-bit output, so it would not seem to be worth it.)
-               if (sum_sq_error > 0.5f / (256.0f * 256.0f)) {
+               if (sum_sq_error > 0.5f / (255.0f * 255.0f)) {
                        continue;
                }
 
                // OK, we can combine this and the next sample.
                if (dst != NULL) {
-                       dst[j * 2 + 0] = total_weight;
-                       dst[j * 2 + 1] = pos1 + offset * (pos2 - pos1);
+                       dst[j].weight = total_weight;
+                       dst[j].pos = pos;
                }
 
                ++i;  // Skip the next sample.
@@ -104,6 +117,219 @@ unsigned combine_samples(float *src, float *dst, unsigned num_src_samples, unsig
        return num_samples_saved;
 }
 
+// Normalize so that the sum becomes one. Note that we do it twice;
+// this sometimes helps a tiny little bit when we have many samples.
+template<class T>
+void normalize_sum(Tap<T>* vals, unsigned num)
+{
+       for (int normalize_pass = 0; normalize_pass < 2; ++normalize_pass) {
+               double sum = 0.0;
+               for (unsigned i = 0; i < num; ++i) {
+                       sum += to_fp64(vals[i].weight);
+               }
+               for (unsigned i = 0; i < num; ++i) {
+                       vals[i].weight = from_fp64<T>(to_fp64(vals[i].weight) / sum);
+               }
+       }
+}
+
+// Make use of the bilinear filtering in the GPU to reduce the number of samples
+// we need to make. This is a bit more complex than BlurEffect since we cannot combine
+// two neighboring samples if their weights have differing signs, so we first need to
+// figure out the maximum number of samples. Then, we downconvert all the weights to
+// that number -- we could have gone for a variable-length system, but this is simpler,
+// and the gains would probably be offset by the extra cost of checking when to stop.
+//
+// The greedy strategy for combining samples is optimal.
+template<class DestFloat>
+unsigned combine_many_samples(const Tap<float> *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, Tap<DestFloat> **bilinear_weights)
+{
+       int src_bilinear_samples = 0;
+       for (unsigned y = 0; y < dst_samples; ++y) {
+               unsigned num_samples_saved = combine_samples<DestFloat>(weights + y * src_samples, NULL, src_size, src_samples, UINT_MAX);
+               src_bilinear_samples = max<int>(src_bilinear_samples, src_samples - num_samples_saved);
+       }
+
+       // Now that we know the right width, actually combine the samples.
+       *bilinear_weights = new Tap<DestFloat>[dst_samples * src_bilinear_samples];
+       for (unsigned y = 0; y < dst_samples; ++y) {
+               Tap<DestFloat> *bilinear_weights_ptr = *bilinear_weights + y * src_bilinear_samples;
+               unsigned num_samples_saved = combine_samples(
+                       weights + y * src_samples,
+                       bilinear_weights_ptr,
+                       src_size,
+                       src_samples,
+                       src_samples - src_bilinear_samples);
+               assert(int(src_samples) - int(num_samples_saved) == src_bilinear_samples);
+               normalize_sum(bilinear_weights_ptr, src_bilinear_samples);
+       }
+       return src_bilinear_samples;
+}
+
+// Compute the sum of squared errors between the ideal weights (which are
+// assumed to fall exactly on pixel centers) and the weights that result
+// from sampling at <bilinear_weights>. The primary reason for the difference
+// is inaccuracy in the sampling positions, both due to limited precision
+// in storing them (already inherent in sending them in as fp16_int_t)
+// and in subtexel sampling precision (which we calculate in this function).
+template<class T>
+double compute_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
+                            const Tap<T>* bilinear_weights, unsigned num_bilinear_weights,
+                            unsigned size)
+{
+       // Find the effective range of the bilinear-optimized kernel.
+       // Due to rounding of the positions, this is not necessarily the same
+       // as the intended range (ie., the range of the original weights).
+       int lower_pos = int(floor(to_fp64(bilinear_weights[0].pos) * size - 0.5));
+       int upper_pos = int(ceil(to_fp64(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5)) + 2;
+       lower_pos = min<int>(lower_pos, lrintf(weights[0].pos * size - 0.5));
+       upper_pos = max<int>(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5));
+
+       float* effective_weights = new float[upper_pos - lower_pos];
+       for (int i = 0; i < upper_pos - lower_pos; ++i) {
+               effective_weights[i] = 0.0f;
+       }
+
+       // Now find the effective weights that result from this sampling.
+       for (unsigned i = 0; i < num_bilinear_weights; ++i) {
+               const float pixel_pos = to_fp64(bilinear_weights[i].pos) * size - 0.5f;
+               const int x0 = int(floor(pixel_pos)) - lower_pos;
+               const int x1 = x0 + 1;
+               const float f = lrintf((pixel_pos - (x0 + lower_pos)) / movit_texel_subpixel_precision) * movit_texel_subpixel_precision;
+
+               assert(x0 >= 0);
+               assert(x1 >= 0);
+               assert(x0 < upper_pos - lower_pos);
+               assert(x1 < upper_pos - lower_pos);
+
+               effective_weights[x0] += to_fp64(bilinear_weights[i].weight) * (1.0 - f);
+               effective_weights[x1] += to_fp64(bilinear_weights[i].weight) * f;
+       }
+
+       // Subtract the desired weights to get the error.
+       for (unsigned i = 0; i < num_weights; ++i) {
+               const int x = lrintf(weights[i].pos * size - 0.5f) - lower_pos;
+               assert(x >= 0);
+               assert(x < upper_pos - lower_pos);
+
+               effective_weights[x] -= weights[i].weight;
+       }
+
+       double sum_sq_error = 0.0;
+       for (unsigned i = 0; i < num_weights; ++i) {
+               sum_sq_error += effective_weights[i] * effective_weights[i];
+       }
+
+       delete[] effective_weights;
+       return sum_sq_error;
+}
+
+// Given a predefined, fixed set of bilinear weight positions, try to optimize
+// their weights through some linear algebra. This can do a better job than
+// the weight calculation in combine_samples() because it can look at the entire
+// picture (an effective weight can sometimes be affected by multiple samples).
+// It will also optimize weights for non-combined samples, which is useful when
+// a sample happens in-between texels for numerical reasons.
+//
+// The math goes as follows: The desired result is a weighted sum, where the
+// weights are the coefficients in <weights>:
+//
+//   y = sum(c_j x_j, j)
+//
+// We try to approximate this by a different set of coefficients, which have
+// weights d_i and are placed at some fraction to the right of a source texel x_j.
+// This means it will influence two texels (x_j and x_{j+1}); generalizing this,
+// let us define that w_ij means the amount texel <j> influences bilinear weight
+// <i> (keeping in mind that w_ij = 0 for all but at most two different j).
+// This means the actually computed result is:
+//
+//   y' = sum(d_i w_ij x_j, j)
+//
+// We assume w_ij fixed and wish to find {d_i} so that y' gets as close to y
+// as possible. Specifically, let us consider the sum of squred errors of the
+// coefficients:
+//
+//   ε² = sum((sum( d_i w_ij, i ) - c_j)², j)
+//
+// The standard trick, which also applies just fine here, is to differentiate
+// the error with respect to each variable we wish to optimize, and set each
+// such expression to zero. Solving this equation set (which we can do efficiently
+// by letting Eigen invert a sparse matrix for us) yields the minimum possible
+// error. To see the form each such equation takes, pick any value k and
+// differentiate the expression by d_k:
+//
+//   ∂(ε²)/∂(d_k) = sum(2(sum( d_i w_ij, i ) - c_j) w_kj, j)
+//
+// Setting this expression equal to zero, dropping the irrelevant factor 2 and
+// rearranging yields:
+//
+//   sum(w_kj sum( d_i w_ij, i ), j) = sum(w_kj c_j, j)
+//
+// where again, we remember where the sums over j are over at most two elements,
+// since w_ij is nonzero for at most two values of j.
+template<class T>
+void optimize_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
+                           Tap<T>* bilinear_weights, unsigned num_bilinear_weights,
+                           unsigned size)
+{
+       // Find the range of the desired weights.
+       int c_lower_pos = lrintf(weights[0].pos * size - 0.5);
+       int c_upper_pos = lrintf(weights[num_weights - 1].pos * size - 0.5) + 1;
+
+       SparseMatrix<float> A(num_bilinear_weights, num_bilinear_weights);
+       SparseVector<float> b(num_bilinear_weights);
+
+       // Convert each bilinear weight to the (x, frac) form for less junk in the code below.
+       int* pos = new int[num_bilinear_weights];
+       float* fracs = new float[num_bilinear_weights];
+       for (unsigned i = 0; i < num_bilinear_weights; ++i) {
+               const float pixel_pos = to_fp64(bilinear_weights[i].pos) * size - 0.5f;
+               const float f = pixel_pos - floor(pixel_pos);
+               pos[i] = int(floor(pixel_pos));
+               fracs[i] = lrintf(f / movit_texel_subpixel_precision) * movit_texel_subpixel_precision;
+       }
+
+       // The index ordering is a bit unusual to fit better with the
+       // notation in the derivation above.
+       for (unsigned k = 0; k < num_bilinear_weights; ++k) {
+               for (int j = pos[k]; j <= pos[k] + 1; ++j) {
+                       const float w_kj = (j == pos[k]) ? (1.0f - fracs[k]) : fracs[k];
+                       for (unsigned i = 0; i < num_bilinear_weights; ++i) {
+                               float w_ij;
+                               if (j == pos[i]) {
+                                       w_ij = 1.0f - fracs[i];
+                               } else if (j == pos[i] + 1) {
+                                       w_ij = fracs[i];
+                               } else {
+                                       // w_ij = 0
+                                       continue;
+                               }
+                               A.coeffRef(i, k) += w_kj * w_ij;
+                       }
+                       float c_j;
+                       if (j >= c_lower_pos && j < c_upper_pos) {
+                               c_j = weights[j - c_lower_pos].weight;
+                       } else {
+                               c_j = 0.0f;
+                       }
+                       b.coeffRef(k) += w_kj * c_j;
+               }
+       }
+       delete[] pos;
+       delete[] fracs;
+
+       A.makeCompressed();
+       SparseQR<SparseMatrix<float>, COLAMDOrdering<int> > qr(A);
+       assert(qr.info() == Success);
+       SparseMatrix<float> new_weights = qr.solve(b);
+       assert(qr.info() == Success);
+
+       for (unsigned i = 0; i < num_bilinear_weights; ++i) {
+               bilinear_weights[i].weight = from_fp64<T>(new_weights.coeff(i, 0));
+       }
+       normalize_sum(bilinear_weights, num_bilinear_weights);
+}
+
 }  // namespace
 
 ResampleEffect::ResampleEffect()
@@ -370,7 +596,7 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str
        float radius_scaling_factor = min(scaling_factor, 1.0f);
        int int_radius = lrintf(LANCZOS_RADIUS / radius_scaling_factor);
        int src_samples = int_radius * 2 + 1;
-       float *weights = new float[dst_samples * src_samples * 2];
+       Tap<float> *weights = new Tap<float>[dst_samples * src_samples];
        float subpixel_offset = offset - lrintf(offset);  // The part not covered by whole_pixel_offset.
        assert(subpixel_offset >= -0.5f && subpixel_offset <= 0.5f);
        for (unsigned y = 0; y < dst_samples; ++y) {
@@ -383,55 +609,40 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str
                for (int i = 0; i < src_samples; ++i) {
                        int src_y = base_src_y + i - int_radius;
                        float weight = lanczos_weight(radius_scaling_factor * (src_y - center_src_y - subpixel_offset), LANCZOS_RADIUS);
-                       weights[(y * src_samples + i) * 2 + 0] = weight * radius_scaling_factor;
-                       weights[(y * src_samples + i) * 2 + 1] = (src_y + 0.5) / float(src_size);
+                       weights[y * src_samples + i].weight = weight * radius_scaling_factor;
+                       weights[y * src_samples + i].pos = (src_y + 0.5) / float(src_size);
                }
        }
 
        // Now make use of the bilinear filtering in the GPU to reduce the number of samples
-       // we need to make. This is a bit more complex than BlurEffect since we cannot combine
-       // two neighboring samples if their weights have differing signs, so we first need to
-       // figure out the maximum number of samples. Then, we downconvert all the weights to
-       // that number -- we could have gone for a variable-length system, but this is simpler,
-       // and the gains would probably be offset by the extra cost of checking when to stop.
-       //
-       // The greedy strategy for combining samples is optimal.
-       src_bilinear_samples = 0;
+       // we need to make. Try fp16 first; if it's not accurate enough, we go to fp32.
+       Tap<fp16_int_t> *bilinear_weights_fp16;
+       src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp16);
+       Tap<float> *bilinear_weights_fp32 = NULL;
+       bool fallback_to_fp32 = false;
+       double max_sum_sq_error_fp16 = 0.0;
        for (unsigned y = 0; y < dst_samples; ++y) {
-               unsigned num_samples_saved = combine_samples(weights + (y * src_samples) * 2, NULL, src_samples, UINT_MAX);
-               src_bilinear_samples = max<int>(src_bilinear_samples, src_samples - num_samples_saved);
+               optimize_sum_sq_error(
+                       weights + y * src_samples, src_samples,
+                       bilinear_weights_fp16 + y * src_bilinear_samples, src_bilinear_samples,
+                       src_size);
+               double sum_sq_error_fp16 = compute_sum_sq_error(
+                       weights + y * src_samples, src_samples,
+                       bilinear_weights_fp16 + y * src_bilinear_samples, src_bilinear_samples,
+                       src_size);
+               max_sum_sq_error_fp16 = std::max(max_sum_sq_error_fp16, sum_sq_error_fp16);
        }
 
-       // Now that we know the right width, actually combine the samples.
-       float *bilinear_weights = new float[dst_samples * src_bilinear_samples * 2];
-       fp16_int_t *bilinear_weights_fp16 = new fp16_int_t[dst_samples * src_bilinear_samples * 2];
-       for (unsigned y = 0; y < dst_samples; ++y) {
-               float *bilinear_weights_ptr = bilinear_weights + (y * src_bilinear_samples) * 2;
-               fp16_int_t *bilinear_weights_fp16_ptr = bilinear_weights_fp16 + (y * src_bilinear_samples) * 2;
-               unsigned num_samples_saved = combine_samples(
-                       weights + (y * src_samples) * 2,
-                       bilinear_weights_ptr,
-                       src_samples,
-                       src_samples - src_bilinear_samples);
-               assert(int(src_samples) - int(num_samples_saved) == src_bilinear_samples);
-
-               // Convert to fp16.
-               for (int i = 0; i < src_bilinear_samples; ++i) {
-                       bilinear_weights_fp16_ptr[i * 2 + 0] = fp64_to_fp16(bilinear_weights_ptr[i * 2 + 0]);
-                       bilinear_weights_fp16_ptr[i * 2 + 1] = fp64_to_fp16(bilinear_weights_ptr[i * 2 + 1]);
-               }
-
-               // Normalize so that the sum becomes one. Note that we do it twice;
-               // this sometimes helps a tiny little bit when we have many samples.
-               for (int normalize_pass = 0; normalize_pass < 2; ++normalize_pass) {
-                       double sum = 0.0;
-                       for (int i = 0; i < src_bilinear_samples; ++i) {
-                               sum += fp16_to_fp64(bilinear_weights_fp16_ptr[i * 2 + 0]);
-                       }
-                       for (int i = 0; i < src_bilinear_samples; ++i) {
-                               bilinear_weights_fp16_ptr[i * 2 + 0] = fp64_to_fp16(
-                                       fp16_to_fp64(bilinear_weights_fp16_ptr[i * 2 + 0]) / sum);
-                       }
+       // Our tolerance level for total error is a bit higher than the one for invididual
+       // samples, since one would assume overall errors in the shape don't matter as much.
+       if (max_sum_sq_error_fp16 > 2.0f / (255.0f * 255.0f)) {
+               fallback_to_fp32 = true;
+               src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp32);
+               for (unsigned y = 0; y < dst_samples; ++y) {
+                       optimize_sum_sq_error(
+                               weights + y * src_samples, src_samples,
+                               bilinear_weights_fp32 + y * src_bilinear_samples, src_bilinear_samples,
+                               src_size);
                }
        }
 
@@ -446,12 +657,16 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str
        check_error();
        glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
        check_error();
-       glTexImage2D(GL_TEXTURE_2D, 0, GL_RG16F, src_bilinear_samples, dst_samples, 0, GL_RG, GL_HALF_FLOAT, bilinear_weights_fp16);
+       if (fallback_to_fp32) {
+               glTexImage2D(GL_TEXTURE_2D, 0, GL_RG32F, src_bilinear_samples, dst_samples, 0, GL_RG, GL_FLOAT, bilinear_weights_fp32);
+       } else {
+               glTexImage2D(GL_TEXTURE_2D, 0, GL_RG16F, src_bilinear_samples, dst_samples, 0, GL_RG, GL_HALF_FLOAT, bilinear_weights_fp16);
+       }
        check_error();
 
        delete[] weights;
-       delete[] bilinear_weights;
        delete[] bilinear_weights_fp16;
+       delete[] bilinear_weights_fp32;
 }
 
 void SingleResamplePassEffect::set_gl_state(GLuint glsl_program_num, const string &prefix, unsigned *sampler_num)
@@ -484,7 +699,7 @@ void SingleResamplePassEffect::set_gl_state(GLuint glsl_program_num, const strin
        check_error();
 
        set_uniform_int(glsl_program_num, prefix, "sample_tex", *sampler_num);
-       ++sampler_num;
+       ++*sampler_num;
        set_uniform_int(glsl_program_num, prefix, "num_samples", src_bilinear_samples);
        set_uniform_float(glsl_program_num, prefix, "num_loops", num_loops);
        set_uniform_float(glsl_program_num, prefix, "slice_height", slice_height);