]> git.sesse.net Git - movit/blobdiff - resample_effect.cpp
Remove some unneeded conversions from ResampleEffect. Speeds up texture generation...
[movit] / resample_effect.cpp
index 958ed25b9c035a9ca1ad59109294b9cc96ed97ff..ba8a71c0241443c0730a5ce99d68a2fa7c014ad3 100644 (file)
@@ -1,7 +1,7 @@
 // Three-lobed Lanczos, the most common choice.
 // Note that if you change this, the accuracy for LANCZOS_TABLE_SIZE
 // needs to be recomputed.
-#define LANCZOS_RADIUS 3.0
+#define LANCZOS_RADIUS 3.0f
 
 #include <epoxy/gl.h>
 #include <assert.h>
@@ -27,12 +27,6 @@ namespace movit {
 
 namespace {
 
-template<class T>
-struct Tap {
-       T weight;
-       T pos;
-};
-
 float sinc(float x)
 {
        if (fabs(x) < 1e-6) {
@@ -74,6 +68,9 @@ float lanczos_weight(float x)
 //
 // Solve for e = 1e-6 yields a step size of 0.0027, which to cover the range
 // 0..3 needs 1109 steps. We round up to the next power of two, just to be sure.
+//
+// You need to call lanczos_table_init_done before the first call to
+// lanczos_weight_cached.
 #define LANCZOS_TABLE_SIZE 2048
 bool lanczos_table_init_done = false;
 float lanczos_table[LANCZOS_TABLE_SIZE + 2];
@@ -88,17 +85,12 @@ void init_lanczos_table()
 
 float lanczos_weight_cached(float x)
 {
-       if (!lanczos_table_init_done) {
-               // Could in theory race between two threads if we are unlucky,
-               // but that is harmless, since they'll write the same data.
-               init_lanczos_table();
-       }
        x = fabs(x);
        if (x > LANCZOS_RADIUS) {
                return 0.0f;
        }
        float table_pos = x * (LANCZOS_TABLE_SIZE / LANCZOS_RADIUS);
-       int table_pos_int = int(table_pos);  // Truncate towards zero.
+       unsigned table_pos_int = int(table_pos);  // Truncate towards zero.
        float table_pos_frac = table_pos - table_pos_int;
        assert(table_pos < LANCZOS_TABLE_SIZE + 2);
        return lanczos_table[table_pos_int] +
@@ -194,13 +186,13 @@ template<class T>
 void normalize_sum(Tap<T>* vals, unsigned num)
 {
        for (int normalize_pass = 0; normalize_pass < 2; ++normalize_pass) {
-               double sum = 0.0;
+               float sum = 0.0;
                for (unsigned i = 0; i < num; ++i) {
-                       sum += to_fp64(vals[i].weight);
+                       sum += to_fp32(vals[i].weight);
                }
-               double inv_sum = 1.0 / sum;
+               float inv_sum = 1.0 / sum;
                for (unsigned i = 0; i < num; ++i) {
-                       vals[i].weight = from_fp64<T>(to_fp64(vals[i].weight) * inv_sum);
+                       vals[i].weight = from_fp32<T>(to_fp32(vals[i].weight) * inv_sum);
                }
        }
 }
@@ -214,7 +206,7 @@ void normalize_sum(Tap<T>* vals, unsigned num)
 //
 // The greedy strategy for combining samples is optimal.
 template<class DestFloat>
-unsigned combine_many_samples(const Tap<float> *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, Tap<DestFloat> **bilinear_weights)
+unsigned combine_many_samples(const Tap<float> *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, unique_ptr<Tap<DestFloat>[]> *bilinear_weights)
 {
        float num_subtexels = src_size / movit_texel_subpixel_precision;
        float inv_num_subtexels = movit_texel_subpixel_precision / src_size;
@@ -227,9 +219,9 @@ unsigned combine_many_samples(const Tap<float> *weights, unsigned src_size, unsi
 
        // Now that we know the right width, actually combine the samples.
        unsigned src_bilinear_samples = src_samples - max_samples_saved;
-       *bilinear_weights = new Tap<DestFloat>[dst_samples * src_bilinear_samples];
+       bilinear_weights->reset(new Tap<DestFloat>[dst_samples * src_bilinear_samples]);
        for (unsigned y = 0; y < dst_samples; ++y) {
-               Tap<DestFloat> *bilinear_weights_ptr = *bilinear_weights + y * src_bilinear_samples;
+               Tap<DestFloat> *bilinear_weights_ptr = bilinear_weights->get() + y * src_bilinear_samples;
                unsigned num_samples_saved = combine_samples(
                        weights + y * src_samples,
                        bilinear_weights_ptr,
@@ -257,10 +249,10 @@ double compute_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
        // Find the effective range of the bilinear-optimized kernel.
        // Due to rounding of the positions, this is not necessarily the same
        // as the intended range (ie., the range of the original weights).
-       int lower_pos = int(floor(to_fp64(bilinear_weights[0].pos) * size - 0.5));
-       int upper_pos = int(ceil(to_fp64(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5)) + 2;
-       lower_pos = min<int>(lower_pos, lrintf(weights[0].pos * size - 0.5));
-       upper_pos = max<int>(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5) + 1);
+       int lower_pos = int(floor(to_fp32(bilinear_weights[0].pos) * size - 0.5f));
+       int upper_pos = int(ceil(to_fp32(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5f)) + 2;
+       lower_pos = min<int>(lower_pos, lrintf(weights[0].pos * size - 0.5f));
+       upper_pos = max<int>(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5f) + 1);
 
        float* effective_weights = new float[upper_pos - lower_pos];
        for (int i = 0; i < upper_pos - lower_pos; ++i) {
@@ -269,7 +261,7 @@ double compute_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
 
        // Now find the effective weights that result from this sampling.
        for (unsigned i = 0; i < num_bilinear_weights; ++i) {
-               const float pixel_pos = to_fp64(bilinear_weights[i].pos) * size - 0.5f;
+               const float pixel_pos = to_fp32(bilinear_weights[i].pos) * size - 0.5f;
                const int x0 = int(floor(pixel_pos)) - lower_pos;
                const int x1 = x0 + 1;
                const float f = lrintf((pixel_pos - (x0 + lower_pos)) / movit_texel_subpixel_precision) * movit_texel_subpixel_precision;
@@ -279,8 +271,8 @@ double compute_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
                assert(x0 < upper_pos - lower_pos);
                assert(x1 < upper_pos - lower_pos);
 
-               effective_weights[x0] += to_fp64(bilinear_weights[i].weight) * (1.0 - f);
-               effective_weights[x1] += to_fp64(bilinear_weights[i].weight) * f;
+               effective_weights[x0] += to_fp32(bilinear_weights[i].weight) * (1.0f - f);
+               effective_weights[x1] += to_fp32(bilinear_weights[i].weight) * f;
        }
 
        // Subtract the desired weights to get the error.
@@ -461,6 +453,12 @@ SingleResamplePassEffect::SingleResamplePassEffect(ResampleEffect *parent)
        register_uniform_float("whole_pixel_offset", &uniform_whole_pixel_offset);
 
        glGenTextures(1, &texnum);
+
+       if (!lanczos_table_init_done) {
+               // Could in theory race between two threads if we are unlucky,
+               // but that is harmless, since they'll write the same data.
+               init_lanczos_table();
+       }
 }
 
 SingleResamplePassEffect::~SingleResamplePassEffect()
@@ -502,12 +500,68 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str
                assert(false);
        }
 
+       ScalingWeights weights = calculate_scaling_weights(src_size, dst_size, zoom, offset);
+       src_bilinear_samples = weights.src_bilinear_samples;
+       num_loops = weights.num_loops;
+       slice_height = 1.0f / weights.num_loops;
+
+       // Encode as a two-component texture. Note the GL_REPEAT.
+       glActiveTexture(GL_TEXTURE0 + *sampler_num);
+       check_error();
+       glBindTexture(GL_TEXTURE_2D, texnum);
+       check_error();
+       if (last_texture_width == -1) {
+               // Need to set this state the first time.
+               glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+               check_error();
+               glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
+               check_error();
+               glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
+               check_error();
+       }
+
+       GLenum type, internal_format;
+       void *pixels;
+       assert((weights.bilinear_weights_fp16 == nullptr) != (weights.bilinear_weights_fp32 == nullptr));
+       if (weights.bilinear_weights_fp32 != nullptr) {
+               type = GL_FLOAT;
+               internal_format = GL_RG32F;
+               pixels = weights.bilinear_weights_fp32.get();
+       } else {
+               type = GL_HALF_FLOAT;
+               internal_format = GL_RG16F;
+               pixels = weights.bilinear_weights_fp16.get();
+       }
+
+       if (int(weights.src_bilinear_samples) == last_texture_width &&
+           int(weights.dst_samples) == last_texture_height &&
+           internal_format == last_texture_internal_format) {
+               // Texture dimensions and type are unchanged; it is more efficient
+               // to just update it rather than making an entirely new texture.
+               glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, weights.src_bilinear_samples, weights.dst_samples, GL_RG, type, pixels);
+       } else {
+               glTexImage2D(GL_TEXTURE_2D, 0, internal_format, weights.src_bilinear_samples, weights.dst_samples, 0, GL_RG, type, pixels);
+               last_texture_width = weights.src_bilinear_samples;
+               last_texture_height = weights.dst_samples;
+               last_texture_internal_format = internal_format;
+       }
+       check_error();
+}
+
+ScalingWeights calculate_scaling_weights(unsigned src_size, unsigned dst_size, float zoom, float offset)
+{
+       if (!lanczos_table_init_done) {
+               // Only needed if run from outside ResampleEffect.
+               init_lanczos_table();
+       }
+
        // For many resamplings (e.g. 640 -> 1280), we will end up with the same
        // set of samples over and over again in a loop. Thus, we can compute only
        // the first such loop, and then ask the card to repeat the texture for us.
        // This is both easier on the texture cache and lowers our CPU cost for
        // generating the kernel somewhat.
        float scaling_factor;
+       int num_loops;
        if (fabs(zoom - 1.0f) < 1e-6) {
                num_loops = gcd(src_size, dst_size);
                scaling_factor = float(dst_size) / float(src_size);
@@ -520,7 +574,6 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str
                num_loops = 1;
                scaling_factor = zoom * float(dst_size) / float(src_size);
        }
-       slice_height = 1.0f / num_loops;
        unsigned dst_samples = dst_size / num_loops;
 
        // Sample the kernel in the right place. A diagram with a triangular kernel
@@ -575,7 +628,7 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str
        float radius_scaling_factor = min(scaling_factor, 1.0f);
        int int_radius = lrintf(LANCZOS_RADIUS / radius_scaling_factor);
        int src_samples = int_radius * 2 + 1;
-       Tap<float> *weights = new Tap<float>[dst_samples * src_samples];
+       unique_ptr<Tap<float>[]> weights(new Tap<float>[dst_samples * src_samples]);
        float subpixel_offset = offset - lrintf(offset);  // The part not covered by whole_pixel_offset.
        assert(subpixel_offset >= -0.5f && subpixel_offset <= 0.5f);
        for (unsigned y = 0; y < dst_samples; ++y) {
@@ -585,11 +638,12 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str
                int base_src_y = lrintf(center_src_y);
 
                // Now sample <int_radius> pixels on each side around that point.
+               float inv_src_size = 1.0 / float(src_size);
                for (int i = 0; i < src_samples; ++i) {
                        int src_y = base_src_y + i - int_radius;
                        float weight = lanczos_weight_cached(radius_scaling_factor * (src_y - center_src_y - subpixel_offset));
                        weights[y * src_samples + i].weight = weight * radius_scaling_factor;
-                       weights[y * src_samples + i].pos = (src_y + 0.5) / float(src_size);
+                       weights[y * src_samples + i].pos = (src_y + 0.5f) * inv_src_size;
                }
        }
 
@@ -598,15 +652,14 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str
        // Our tolerance level for total error is a bit higher than the one for invididual
        // samples, since one would assume overall errors in the shape don't matter as much.
        const float max_error = 2.0f / (255.0f * 255.0f);
-       Tap<fp16_int_t> *bilinear_weights_fp16;
-       src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp16);
-       Tap<float> *bilinear_weights_fp32 = NULL;
-       bool fallback_to_fp32 = false;
+       unique_ptr<Tap<fp16_int_t>[]> bilinear_weights_fp16;
+       int src_bilinear_samples = combine_many_samples(weights.get(), src_size, src_samples, dst_samples, &bilinear_weights_fp16);
+       unique_ptr<Tap<float>[]> bilinear_weights_fp32 = NULL;
        double max_sum_sq_error_fp16 = 0.0;
        for (unsigned y = 0; y < dst_samples; ++y) {
                double sum_sq_error_fp16 = compute_sum_sq_error(
-                       weights + y * src_samples, src_samples,
-                       bilinear_weights_fp16 + y * src_bilinear_samples, src_bilinear_samples,
+                       weights.get() + y * src_samples, src_samples,
+                       bilinear_weights_fp16.get() + y * src_bilinear_samples, src_bilinear_samples,
                        src_size);
                max_sum_sq_error_fp16 = std::max(max_sum_sq_error_fp16, sum_sq_error_fp16);
                if (max_sum_sq_error_fp16 > max_error) {
@@ -615,54 +668,17 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str
        }
 
        if (max_sum_sq_error_fp16 > max_error) {
-               fallback_to_fp32 = true;
-               src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp32);
-       }
-
-       // Encode as a two-component texture. Note the GL_REPEAT.
-       glActiveTexture(GL_TEXTURE0 + *sampler_num);
-       check_error();
-       glBindTexture(GL_TEXTURE_2D, texnum);
-       check_error();
-       if (last_texture_width == -1) {
-               // Need to set this state the first time.
-               glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
-               check_error();
-               glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
-               check_error();
-               glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
-               check_error();
+               bilinear_weights_fp16.reset();
+               src_bilinear_samples = combine_many_samples(weights.get(), src_size, src_samples, dst_samples, &bilinear_weights_fp32);
        }
 
-       GLenum type, internal_format;
-       void *pixels;
-       if (fallback_to_fp32) {
-               type = GL_FLOAT;
-               internal_format = GL_RG32F;
-               pixels = bilinear_weights_fp32;
-       } else {
-               type = GL_HALF_FLOAT;
-               internal_format = GL_RG16F;
-               pixels = bilinear_weights_fp16;
-       }
-
-       if (int(src_bilinear_samples) == last_texture_width &&
-           int(dst_samples) == last_texture_height &&
-           internal_format == last_texture_internal_format) {
-               // Texture dimensions and type are unchanged; it is more efficient
-               // to just update it rather than making an entirely new texture.
-               glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, src_bilinear_samples, dst_samples, GL_RG, type, pixels);
-       } else {
-               glTexImage2D(GL_TEXTURE_2D, 0, internal_format, src_bilinear_samples, dst_samples, 0, GL_RG, type, pixels);
-               last_texture_width = src_bilinear_samples;
-               last_texture_height = dst_samples;
-               last_texture_internal_format = internal_format;
-       }
-       check_error();
-
-       delete[] weights;
-       delete[] bilinear_weights_fp16;
-       delete[] bilinear_weights_fp32;
+       ScalingWeights ret;
+       ret.src_bilinear_samples = src_bilinear_samples;
+       ret.dst_samples = dst_samples;
+       ret.num_loops = num_loops;
+       ret.bilinear_weights_fp16 = move(bilinear_weights_fp16);
+       ret.bilinear_weights_fp32 = move(bilinear_weights_fp32);
+       return ret;
 }
 
 void SingleResamplePassEffect::set_gl_state(GLuint glsl_program_num, const string &prefix, unsigned *sampler_num)
@@ -713,10 +729,12 @@ void SingleResamplePassEffect::set_gl_state(GLuint glsl_program_num, const strin
        // We specifically do not want mipmaps on the input texture;
        // they break minification.
        Node *self = chain->find_node_for_effect(this);
-       glActiveTexture(chain->get_input_sampler(self, 0));
-       check_error();
-       glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
-       check_error();
+       if (chain->has_input_sampler(self, 0)) {
+               glActiveTexture(chain->get_input_sampler(self, 0));
+               check_error();
+               glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+               check_error();
+       }
 }
 
 }  // namespace movit