X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;ds=sidebyside;f=resample_effect.cpp;h=453a83832f7cb5d905a11b49951ac63a6f14cc12;hb=706365ccee2ad69c5bc3608e12ca8e9ada7ce954;hp=9b6d5f3d4d964168d0a4a0f191e8552b1f54217d;hpb=b1b5194238dd8b357148a3eee48d8d3a1ad04b35;p=movit diff --git a/resample_effect.cpp b/resample_effect.cpp index 9b6d5f3..453a838 100644 --- a/resample_effect.cpp +++ b/resample_effect.cpp @@ -1,7 +1,7 @@ // Three-lobed Lanczos, the most common choice. // Note that if you change this, the accuracy for LANCZOS_TABLE_SIZE // needs to be recomputed. -#define LANCZOS_RADIUS 3.0 +#define LANCZOS_RADIUS 3.0f #include #include @@ -90,7 +90,7 @@ float lanczos_weight_cached(float x) return 0.0f; } float table_pos = x * (LANCZOS_TABLE_SIZE / LANCZOS_RADIUS); - int table_pos_int = int(table_pos); // Truncate towards zero. + unsigned table_pos_int = int(table_pos); // Truncate towards zero. float table_pos_frac = table_pos - table_pos_int; assert(table_pos < LANCZOS_TABLE_SIZE + 2); return lanczos_table[table_pos_int] + @@ -109,7 +109,7 @@ unsigned gcd(unsigned a, unsigned b) } template -unsigned combine_samples(const Tap *src, Tap *dst, float num_subtexels, float inv_num_subtexels, unsigned num_src_samples, unsigned max_samples_saved) +unsigned combine_samples(const Tap *src, Tap *dst, float num_subtexels, float inv_num_subtexels, unsigned num_src_samples, unsigned max_samples_saved, float pos1_pos2_diff, float inv_pos1_pos2_diff) { // Cut off near-zero values at both sides. unsigned num_samples_saved = 0; @@ -157,7 +157,7 @@ unsigned combine_samples(const Tap *src, Tap *dst, float num_s DestFloat pos, total_weight; float sum_sq_error; - combine_two_samples(w1, w2, pos1, pos2, num_subtexels, inv_num_subtexels, &pos, &total_weight, &sum_sq_error); + combine_two_samples(w1, w2, pos1, pos1_pos2_diff, inv_pos1_pos2_diff, num_subtexels, inv_num_subtexels, &pos, &total_weight, &sum_sq_error); // If the interpolation error is larger than that of about sqrt(2) of // a level at 8-bit precision, don't combine. (You'd think 1.0 was enough, @@ -206,29 +206,34 @@ void normalize_sum(Tap* vals, unsigned num) // // The greedy strategy for combining samples is optimal. template -unsigned combine_many_samples(const Tap *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, unique_ptr[]> *bilinear_weights) +unsigned combine_many_samples(const Tap *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, Tap **bilinear_weights) { float num_subtexels = src_size / movit_texel_subpixel_precision; float inv_num_subtexels = movit_texel_subpixel_precision / src_size; + float pos1_pos2_diff = 1.0f / src_size; + float inv_pos1_pos2_diff = src_size; unsigned max_samples_saved = UINT_MAX; for (unsigned y = 0; y < dst_samples && max_samples_saved > 0; ++y) { - unsigned num_samples_saved = combine_samples(weights + y * src_samples, NULL, num_subtexels, inv_num_subtexels, src_samples, max_samples_saved); + unsigned num_samples_saved = combine_samples(weights + y * src_samples, NULL, num_subtexels, inv_num_subtexels, src_samples, max_samples_saved, pos1_pos2_diff, inv_pos1_pos2_diff); max_samples_saved = min(max_samples_saved, num_samples_saved); } // Now that we know the right width, actually combine the samples. unsigned src_bilinear_samples = src_samples - max_samples_saved; - bilinear_weights->reset(new Tap[dst_samples * src_bilinear_samples]); + if (*bilinear_weights != NULL) delete[] *bilinear_weights; + *bilinear_weights = new Tap[dst_samples * src_bilinear_samples]; for (unsigned y = 0; y < dst_samples; ++y) { - Tap *bilinear_weights_ptr = bilinear_weights->get() + y * src_bilinear_samples; + Tap *bilinear_weights_ptr = *bilinear_weights + y * src_bilinear_samples; unsigned num_samples_saved = combine_samples( weights + y * src_samples, bilinear_weights_ptr, num_subtexels, inv_num_subtexels, src_samples, - max_samples_saved); + max_samples_saved, + pos1_pos2_diff, + inv_pos1_pos2_diff); assert(num_samples_saved == max_samples_saved); normalize_sum(bilinear_weights_ptr, src_bilinear_samples); } @@ -249,10 +254,10 @@ double compute_sum_sq_error(const Tap* weights, unsigned num_weights, // Find the effective range of the bilinear-optimized kernel. // Due to rounding of the positions, this is not necessarily the same // as the intended range (ie., the range of the original weights). - int lower_pos = int(floor(to_fp32(bilinear_weights[0].pos) * size - 0.5)); - int upper_pos = int(ceil(to_fp32(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5)) + 2; - lower_pos = min(lower_pos, lrintf(weights[0].pos * size - 0.5)); - upper_pos = max(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5) + 1); + int lower_pos = int(floor(to_fp32(bilinear_weights[0].pos) * size - 0.5f)); + int upper_pos = int(ceil(to_fp32(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5f)) + 2; + lower_pos = min(lower_pos, lrintf(weights[0].pos * size - 0.5f)); + upper_pos = max(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5f) + 1); float* effective_weights = new float[upper_pos - lower_pos]; for (int i = 0; i < upper_pos - lower_pos; ++i) { @@ -271,7 +276,7 @@ double compute_sum_sq_error(const Tap* weights, unsigned num_weights, assert(x0 < upper_pos - lower_pos); assert(x1 < upper_pos - lower_pos); - effective_weights[x0] += to_fp32(bilinear_weights[i].weight) * (1.0 - f); + effective_weights[x0] += to_fp32(bilinear_weights[i].weight) * (1.0f - f); effective_weights[x1] += to_fp32(bilinear_weights[i].weight) * f; } @@ -296,7 +301,8 @@ double compute_sum_sq_error(const Tap* weights, unsigned num_weights, } // namespace ResampleEffect::ResampleEffect() - : input_width(1280), + : owns_effects(true), + input_width(1280), input_height(720), offset_x(0.0f), offset_y(0.0f), zoom_x(1.0f), zoom_y(1.0f), @@ -314,6 +320,14 @@ ResampleEffect::ResampleEffect() update_size(); } +ResampleEffect::~ResampleEffect() +{ + if (owns_effects) { + delete hpass; + delete vpass; + } +} + void ResampleEffect::rewrite_graph(EffectChain *graph, Node *self) { Node *hpass_node = graph->add_node(hpass); @@ -322,6 +336,7 @@ void ResampleEffect::rewrite_graph(EffectChain *graph, Node *self) graph->replace_receiver(self, hpass_node); graph->replace_sender(self, vpass_node); self->disabled = true; + owns_effects = false; } // We get this information forwarded from the first blur pass, @@ -522,15 +537,15 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str GLenum type, internal_format; void *pixels; - assert((weights.bilinear_weights_fp16 == nullptr) != (weights.bilinear_weights_fp32 == nullptr)); - if (weights.bilinear_weights_fp32 != nullptr) { + assert((weights.bilinear_weights_fp16 == NULL) != (weights.bilinear_weights_fp32 == NULL)); + if (weights.bilinear_weights_fp32 != NULL) { type = GL_FLOAT; internal_format = GL_RG32F; - pixels = weights.bilinear_weights_fp32.get(); + pixels = weights.bilinear_weights_fp32; } else { type = GL_HALF_FLOAT; internal_format = GL_RG16F; - pixels = weights.bilinear_weights_fp16.get(); + pixels = weights.bilinear_weights_fp16; } if (int(weights.src_bilinear_samples) == last_texture_width && @@ -546,6 +561,9 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str last_texture_internal_format = internal_format; } check_error(); + + delete[] weights.bilinear_weights_fp16; + delete[] weights.bilinear_weights_fp32; } ScalingWeights calculate_scaling_weights(unsigned src_size, unsigned dst_size, float zoom, float offset) @@ -628,7 +646,7 @@ ScalingWeights calculate_scaling_weights(unsigned src_size, unsigned dst_size, f float radius_scaling_factor = min(scaling_factor, 1.0f); int int_radius = lrintf(LANCZOS_RADIUS / radius_scaling_factor); int src_samples = int_radius * 2 + 1; - unique_ptr[]> weights(new Tap[dst_samples * src_samples]); + Tap *weights = new Tap[dst_samples * src_samples]; float subpixel_offset = offset - lrintf(offset); // The part not covered by whole_pixel_offset. assert(subpixel_offset >= -0.5f && subpixel_offset <= 0.5f); for (unsigned y = 0; y < dst_samples; ++y) { @@ -638,11 +656,12 @@ ScalingWeights calculate_scaling_weights(unsigned src_size, unsigned dst_size, f int base_src_y = lrintf(center_src_y); // Now sample pixels on each side around that point. + float inv_src_size = 1.0 / float(src_size); for (int i = 0; i < src_samples; ++i) { int src_y = base_src_y + i - int_radius; float weight = lanczos_weight_cached(radius_scaling_factor * (src_y - center_src_y - subpixel_offset)); weights[y * src_samples + i].weight = weight * radius_scaling_factor; - weights[y * src_samples + i].pos = (src_y + 0.5) / float(src_size); + weights[y * src_samples + i].pos = (src_y + 0.5f) * inv_src_size; } } @@ -651,14 +670,14 @@ ScalingWeights calculate_scaling_weights(unsigned src_size, unsigned dst_size, f // Our tolerance level for total error is a bit higher than the one for invididual // samples, since one would assume overall errors in the shape don't matter as much. const float max_error = 2.0f / (255.0f * 255.0f); - unique_ptr[]> bilinear_weights_fp16; - int src_bilinear_samples = combine_many_samples(weights.get(), src_size, src_samples, dst_samples, &bilinear_weights_fp16); - unique_ptr[]> bilinear_weights_fp32 = NULL; + Tap *bilinear_weights_fp16 = NULL; + int src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp16); + Tap *bilinear_weights_fp32 = NULL; double max_sum_sq_error_fp16 = 0.0; for (unsigned y = 0; y < dst_samples; ++y) { double sum_sq_error_fp16 = compute_sum_sq_error( - weights.get() + y * src_samples, src_samples, - bilinear_weights_fp16.get() + y * src_bilinear_samples, src_bilinear_samples, + weights + y * src_samples, src_samples, + bilinear_weights_fp16 + y * src_bilinear_samples, src_bilinear_samples, src_size); max_sum_sq_error_fp16 = std::max(max_sum_sq_error_fp16, sum_sq_error_fp16); if (max_sum_sq_error_fp16 > max_error) { @@ -667,16 +686,19 @@ ScalingWeights calculate_scaling_weights(unsigned src_size, unsigned dst_size, f } if (max_sum_sq_error_fp16 > max_error) { - bilinear_weights_fp16.reset(); - src_bilinear_samples = combine_many_samples(weights.get(), src_size, src_samples, dst_samples, &bilinear_weights_fp32); + delete[] bilinear_weights_fp16; + bilinear_weights_fp16 = NULL; + src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp32); } + delete[] weights; + ScalingWeights ret; ret.src_bilinear_samples = src_bilinear_samples; ret.dst_samples = dst_samples; ret.num_loops = num_loops; - ret.bilinear_weights_fp16 = move(bilinear_weights_fp16); - ret.bilinear_weights_fp32 = move(bilinear_weights_fp32); + ret.bilinear_weights_fp16 = bilinear_weights_fp16; + ret.bilinear_weights_fp32 = bilinear_weights_fp32; return ret; }