X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=resample_effect.cpp;h=74508646091872aec75b020b7dfa60d0639ae125;hb=refs%2Fheads%2Fcompute_resample;hp=79d5f21c45543a936f81e33a4ead19ca32d5a74f;hpb=1bd97eb70a6fcb913bf954e369bc1a90ba17f74e;p=movit diff --git a/resample_effect.cpp b/resample_effect.cpp index 79d5f21..7450864 100644 --- a/resample_effect.cpp +++ b/resample_effect.cpp @@ -1,5 +1,7 @@ // Three-lobed Lanczos, the most common choice. -#define LANCZOS_RADIUS 3.0 +// Note that if you change this, the accuracy for LANCZOS_TABLE_SIZE +// needs to be recomputed. +#define LANCZOS_RADIUS 3.0f #include #include @@ -7,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -25,12 +28,6 @@ namespace movit { namespace { -template -struct Tap { - T weight; - T pos; -}; - float sinc(float x) { if (fabs(x) < 1e-6) { @@ -40,15 +37,66 @@ float sinc(float x) } } -float lanczos_weight(float x, float a) +float lanczos_weight(float x) { - if (fabs(x) > a) { + if (fabs(x) > LANCZOS_RADIUS) { return 0.0f; } else { - return sinc(M_PI * x) * sinc(M_PI * x / a); + return sinc(M_PI * x) * sinc((M_PI / LANCZOS_RADIUS) * x); + } +} + +// The weight function can be expensive to compute over and over again +// (which will happen during e.g. a zoom), but it is also easy to interpolate +// linearly. We compute the right half of the function (in the range of +// 0..LANCZOS_RADIUS), with two guard elements for easier interpolation, and +// linearly interpolate to get our function. +// +// We want to scale the table so that the maximum error is always smaller +// than 1e-6. As per http://www-solar.mcs.st-andrews.ac.uk/~clare/Lectures/num-analysis/Numan_chap3.pdf, +// the error for interpolating a function linearly between points [a,b] is +// +// e = 1/2 (x-a)(x-b) f''(u_x) +// +// for some point u_x in [a,b] (where f(x) is our Lanczos function; we're +// assuming LANCZOS_RADIUS=3 from here on). Obviously this is bounded by +// f''(x) over the entire range. Numeric optimization shows the maximum of +// |f''(x)| to be in x=1.09369819474562880, with the value 2.40067758733152381. +// So if the steps between consecutive values are called d, we get +// +// |e| <= 1/2 (d/2)^2 2.4007 +// |e| <= 0.1367 d^2 +// +// Solve for e = 1e-6 yields a step size of 0.0027, which to cover the range +// 0..3 needs 1109 steps. We round up to the next power of two, just to be sure. +// +// You need to call lanczos_table_init_done before the first call to +// lanczos_weight_cached. +#define LANCZOS_TABLE_SIZE 2048 +static once_flag lanczos_table_init_done; +float lanczos_table[LANCZOS_TABLE_SIZE + 2]; + +void init_lanczos_table() +{ + for (unsigned i = 0; i < LANCZOS_TABLE_SIZE + 2; ++i) { + lanczos_table[i] = lanczos_weight(float(i) * (LANCZOS_RADIUS / LANCZOS_TABLE_SIZE)); } } +float lanczos_weight_cached(float x) +{ + x = fabs(x); + if (x > LANCZOS_RADIUS) { + return 0.0f; + } + float table_pos = x * (LANCZOS_TABLE_SIZE / LANCZOS_RADIUS); + unsigned table_pos_int = int(table_pos); // Truncate towards zero. + float table_pos_frac = table_pos - table_pos_int; + assert(table_pos < LANCZOS_TABLE_SIZE + 2); + return lanczos_table[table_pos_int] + + table_pos_frac * (lanczos_table[table_pos_int + 1] - lanczos_table[table_pos_int]); +} + // Euclid's algorithm, from Wikipedia. unsigned gcd(unsigned a, unsigned b) { @@ -61,7 +109,7 @@ unsigned gcd(unsigned a, unsigned b) } template -unsigned combine_samples(const Tap *src, Tap *dst, float num_subtexels, float inv_num_subtexels, unsigned num_src_samples, unsigned max_samples_saved) +unsigned combine_samples(const Tap *src, Tap *dst, float num_subtexels, float inv_num_subtexels, unsigned num_src_samples, unsigned max_samples_saved, float pos1_pos2_diff, float inv_pos1_pos2_diff) { // Cut off near-zero values at both sides. unsigned num_samples_saved = 0; @@ -81,7 +129,7 @@ unsigned combine_samples(const Tap *src, Tap *dst, float num_s for (unsigned i = 0, j = 0; i < num_src_samples; ++i, ++j) { // Copy the sample directly; it will be overwritten later if we can combine. - if (dst != NULL) { + if (dst != nullptr) { dst[j].weight = convert_float(src[i].weight); dst[j].pos = convert_float(src[i].pos); } @@ -107,9 +155,9 @@ unsigned combine_samples(const Tap *src, Tap *dst, float num_s float pos2 = src[i + 1].pos; assert(pos2 > pos1); - fp16_int_t pos, total_weight; + DestFloat pos, total_weight; float sum_sq_error; - combine_two_samples(w1, w2, pos1, pos2, num_subtexels, inv_num_subtexels, &pos, &total_weight, &sum_sq_error); + combine_two_samples(w1, w2, pos1, pos1_pos2_diff, inv_pos1_pos2_diff, num_subtexels, inv_num_subtexels, &pos, &total_weight, &sum_sq_error); // If the interpolation error is larger than that of about sqrt(2) of // a level at 8-bit precision, don't combine. (You'd think 1.0 was enough, @@ -121,7 +169,7 @@ unsigned combine_samples(const Tap *src, Tap *dst, float num_s } // OK, we can combine this and the next sample. - if (dst != NULL) { + if (dst != nullptr) { dst[j].weight = total_weight; dst[j].pos = pos; } @@ -138,12 +186,28 @@ template void normalize_sum(Tap* vals, unsigned num) { for (int normalize_pass = 0; normalize_pass < 2; ++normalize_pass) { - double sum = 0.0; + float sum = 0.0; + for (unsigned i = 0; i < num; ++i) { + sum += to_fp32(vals[i].weight); + } + float inv_sum = 1.0 / sum; + for (unsigned i = 0; i < num; ++i) { + vals[i].weight = from_fp32(to_fp32(vals[i].weight) * inv_sum); + } + } +} + +template +void normalize_sum(T* vals, unsigned num) +{ + for (int normalize_pass = 0; normalize_pass < 2; ++normalize_pass) { + float sum = 0.0; for (unsigned i = 0; i < num; ++i) { - sum += to_fp64(vals[i].weight); + sum += to_fp32(vals[i]); } + float inv_sum = 1.0 / sum; for (unsigned i = 0; i < num; ++i) { - vals[i].weight = from_fp64(to_fp64(vals[i].weight) / sum); + vals[i] = from_fp32(to_fp32(vals[i]) * inv_sum); } } } @@ -157,29 +221,34 @@ void normalize_sum(Tap* vals, unsigned num) // // The greedy strategy for combining samples is optimal. template -unsigned combine_many_samples(const Tap *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, Tap **bilinear_weights) +unsigned combine_many_samples(const Tap *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, unique_ptr[]> *bilinear_weights) { float num_subtexels = src_size / movit_texel_subpixel_precision; float inv_num_subtexels = movit_texel_subpixel_precision / src_size; - int src_bilinear_samples = 0; + float pos1_pos2_diff = 1.0f / src_size; + float inv_pos1_pos2_diff = src_size; - for (unsigned y = 0; y < dst_samples; ++y) { - unsigned num_samples_saved = combine_samples(weights + y * src_samples, NULL, num_subtexels, inv_num_subtexels, src_samples, UINT_MAX); - src_bilinear_samples = max(src_bilinear_samples, src_samples - num_samples_saved); + unsigned max_samples_saved = UINT_MAX; + for (unsigned y = 0; y < dst_samples && max_samples_saved > 0; ++y) { + unsigned num_samples_saved = combine_samples(weights + y * src_samples, nullptr, num_subtexels, inv_num_subtexels, src_samples, max_samples_saved, pos1_pos2_diff, inv_pos1_pos2_diff); + max_samples_saved = min(max_samples_saved, num_samples_saved); } // Now that we know the right width, actually combine the samples. - *bilinear_weights = new Tap[dst_samples * src_bilinear_samples]; + unsigned src_bilinear_samples = src_samples - max_samples_saved; + bilinear_weights->reset(new Tap[dst_samples * src_bilinear_samples]); for (unsigned y = 0; y < dst_samples; ++y) { - Tap *bilinear_weights_ptr = *bilinear_weights + y * src_bilinear_samples; + Tap *bilinear_weights_ptr = bilinear_weights->get() + y * src_bilinear_samples; unsigned num_samples_saved = combine_samples( weights + y * src_samples, bilinear_weights_ptr, num_subtexels, inv_num_subtexels, src_samples, - src_samples - src_bilinear_samples); - assert(int(src_samples) - int(num_samples_saved) == src_bilinear_samples); + max_samples_saved, + pos1_pos2_diff, + inv_pos1_pos2_diff); + assert(num_samples_saved == max_samples_saved); normalize_sum(bilinear_weights_ptr, src_bilinear_samples); } return src_bilinear_samples; @@ -199,10 +268,10 @@ double compute_sum_sq_error(const Tap* weights, unsigned num_weights, // Find the effective range of the bilinear-optimized kernel. // Due to rounding of the positions, this is not necessarily the same // as the intended range (ie., the range of the original weights). - int lower_pos = int(floor(to_fp64(bilinear_weights[0].pos) * size - 0.5)); - int upper_pos = int(ceil(to_fp64(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5)) + 2; - lower_pos = min(lower_pos, lrintf(weights[0].pos * size - 0.5)); - upper_pos = max(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5) + 1); + int lower_pos = int(floor(to_fp32(bilinear_weights[0].pos) * size - 0.5f)); + int upper_pos = int(ceil(to_fp32(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5f)) + 2; + lower_pos = min(lower_pos, lrintf(weights[0].pos * size - 0.5f)); + upper_pos = max(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5f) + 1); float* effective_weights = new float[upper_pos - lower_pos]; for (int i = 0; i < upper_pos - lower_pos; ++i) { @@ -211,7 +280,7 @@ double compute_sum_sq_error(const Tap* weights, unsigned num_weights, // Now find the effective weights that result from this sampling. for (unsigned i = 0; i < num_bilinear_weights; ++i) { - const float pixel_pos = to_fp64(bilinear_weights[i].pos) * size - 0.5f; + const float pixel_pos = to_fp32(bilinear_weights[i].pos) * size - 0.5f; const int x0 = int(floor(pixel_pos)) - lower_pos; const int x1 = x0 + 1; const float f = lrintf((pixel_pos - (x0 + lower_pos)) / movit_texel_subpixel_precision) * movit_texel_subpixel_precision; @@ -221,8 +290,8 @@ double compute_sum_sq_error(const Tap* weights, unsigned num_weights, assert(x0 < upper_pos - lower_pos); assert(x1 < upper_pos - lower_pos); - effective_weights[x0] += to_fp64(bilinear_weights[i].weight) * (1.0 - f); - effective_weights[x1] += to_fp64(bilinear_weights[i].weight) * f; + effective_weights[x0] += to_fp32(bilinear_weights[i].weight) * (1.0f - f); + effective_weights[x1] += to_fp32(bilinear_weights[i].weight) * f; } // Subtract the desired weights to get the error. @@ -255,22 +324,40 @@ ResampleEffect::ResampleEffect() register_int("width", &output_width); register_int("height", &output_height); - // The first blur pass will forward resolution information to us. - hpass = new SingleResamplePassEffect(this); - CHECK(hpass->set_int("direction", SingleResamplePassEffect::HORIZONTAL)); - vpass = new SingleResamplePassEffect(NULL); - CHECK(vpass->set_int("direction", SingleResamplePassEffect::VERTICAL)); + if (movit_compute_shaders_supported) { + // The effect will forward resolution information to us. + compute_effect_owner.reset(new ResampleComputeEffect(this)); + compute_effect = compute_effect_owner.get(); + } else { + // The first blur pass will forward resolution information to us. + hpass_owner.reset(new SingleResamplePassEffect(this)); + hpass = hpass_owner.get(); + CHECK(hpass->set_int("direction", SingleResamplePassEffect::HORIZONTAL)); + vpass_owner.reset(new SingleResamplePassEffect(this)); + vpass = vpass_owner.get(); + CHECK(vpass->set_int("direction", SingleResamplePassEffect::VERTICAL)); + } update_size(); } +ResampleEffect::~ResampleEffect() +{ +} + void ResampleEffect::rewrite_graph(EffectChain *graph, Node *self) { - Node *hpass_node = graph->add_node(hpass); - Node *vpass_node = graph->add_node(vpass); - graph->connect_nodes(hpass_node, vpass_node); - graph->replace_receiver(self, hpass_node); - graph->replace_sender(self, vpass_node); + if (compute_effect != nullptr) { + Node *compute_node = graph->add_node(compute_effect_owner.release()); + graph->replace_receiver(self, compute_node); + graph->replace_sender(self, compute_node); + } else { + Node *hpass_node = graph->add_node(hpass_owner.release()); + Node *vpass_node = graph->add_node(vpass_owner.release()); + graph->connect_nodes(hpass_node, vpass_node); + graph->replace_receiver(self, hpass_node); + graph->replace_sender(self, vpass_node); + } self->disabled = true; } @@ -289,16 +376,22 @@ void ResampleEffect::inform_input_size(unsigned input_num, unsigned width, unsig void ResampleEffect::update_size() { bool ok = true; - ok |= hpass->set_int("input_width", input_width); - ok |= hpass->set_int("input_height", input_height); - ok |= hpass->set_int("output_width", output_width); - ok |= hpass->set_int("output_height", input_height); - - ok |= vpass->set_int("input_width", output_width); - ok |= vpass->set_int("input_height", input_height); - ok |= vpass->set_int("output_width", output_width); - ok |= vpass->set_int("output_height", output_height); - + if (compute_effect != nullptr) { + ok |= compute_effect->set_int("input_width", input_width); + ok |= compute_effect->set_int("input_height", input_height); + ok |= compute_effect->set_int("output_width", output_width); + ok |= compute_effect->set_int("output_height", output_height); + } else { + ok |= hpass->set_int("input_width", input_width); + ok |= hpass->set_int("input_height", input_height); + ok |= hpass->set_int("output_width", output_width); + ok |= hpass->set_int("output_height", input_height); + + ok |= vpass->set_int("input_width", output_width); + ok |= vpass->set_int("input_height", input_height); + ok |= vpass->set_int("output_width", output_width); + ok |= vpass->set_int("output_height", output_height); + } assert(ok); // The offset added due to zoom may have changed with the size. @@ -314,10 +407,17 @@ void ResampleEffect::update_offset_and_zoom() float extra_offset_x = zoom_center_x * (1.0f - 1.0f / zoom_x) * input_width; float extra_offset_y = (1.0f - zoom_center_y) * (1.0f - 1.0f / zoom_y) * input_height; - ok |= hpass->set_float("offset", extra_offset_x + offset_x); - ok |= vpass->set_float("offset", extra_offset_y - offset_y); // Compensate for the bottom-left origin. - ok |= hpass->set_float("zoom", zoom_x); - ok |= vpass->set_float("zoom", zoom_y); + if (compute_effect != nullptr) { + ok |= compute_effect->set_float("offset_x", extra_offset_x + offset_x); + ok |= compute_effect->set_float("offset_y", extra_offset_y - offset_y); // Compensate for the bottom-left origin. + ok |= compute_effect->set_float("zoom_x", zoom_x); + ok |= compute_effect->set_float("zoom_y", zoom_y); + } else { + ok |= hpass->set_float("offset", extra_offset_x + offset_x); + ok |= vpass->set_float("offset", extra_offset_y - offset_y); // Compensate for the bottom-left origin. + ok |= hpass->set_float("zoom", zoom_x); + ok |= vpass->set_float("zoom", zoom_y); + } assert(ok); } @@ -375,8 +475,8 @@ bool ResampleEffect::set_float(const string &key, float value) { SingleResamplePassEffect::SingleResamplePassEffect(ResampleEffect *parent) : parent(parent), direction(HORIZONTAL), - input_width(1280), - input_height(720), + input_width(1280), + input_height(720), offset(0.0), zoom(1.0), last_input_width(-1), @@ -384,8 +484,7 @@ SingleResamplePassEffect::SingleResamplePassEffect(ResampleEffect *parent) last_output_width(-1), last_output_height(-1), last_offset(0.0 / 0.0), // NaN. - last_zoom(0.0 / 0.0), // NaN. - last_texture_width(-1), last_texture_height(-1) + last_zoom(0.0 / 0.0) // NaN. { register_int("direction", (int *)&direction); register_int("input_width", &input_width); @@ -395,19 +494,18 @@ SingleResamplePassEffect::SingleResamplePassEffect(ResampleEffect *parent) register_float("offset", &offset); register_float("zoom", &zoom); register_uniform_sampler2d("sample_tex", &uniform_sample_tex); - register_uniform_int("num_samples", &uniform_num_samples); // FIXME: What about GLSL pre-1.30? + register_uniform_int("num_samples", &uniform_num_samples); register_uniform_float("num_loops", &uniform_num_loops); register_uniform_float("slice_height", &uniform_slice_height); register_uniform_float("sample_x_scale", &uniform_sample_x_scale); register_uniform_float("sample_x_offset", &uniform_sample_x_offset); register_uniform_float("whole_pixel_offset", &uniform_whole_pixel_offset); - glGenTextures(1, &texnum); + call_once(lanczos_table_init_done, init_lanczos_table); } SingleResamplePassEffect::~SingleResamplePassEffect() { - glDeleteTextures(1, &texnum); } string SingleResamplePassEffect::output_fragment_shader() @@ -444,12 +542,147 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str assert(false); } + ScalingWeights weights = calculate_bilinear_scaling_weights(src_size, dst_size, zoom, offset, BilinearFormatConstraints::ALLOW_FP16_AND_FP32); + src_bilinear_samples = weights.src_bilinear_samples; + num_loops = weights.num_loops; + slice_height = 1.0f / weights.num_loops; + + // Encode as a two-component texture. Note the GL_REPEAT. + glActiveTexture(GL_TEXTURE0 + *sampler_num); + check_error(); + glBindTexture(GL_TEXTURE_2D, tex.get_texnum()); + check_error(); + + GLenum type, internal_format; + void *pixels; + assert((weights.bilinear_weights_fp16 == nullptr) != (weights.bilinear_weights_fp32 == nullptr)); + if (weights.bilinear_weights_fp32 != nullptr) { + type = GL_FLOAT; + internal_format = GL_RG32F; + pixels = weights.bilinear_weights_fp32.get(); + } else { + type = GL_HALF_FLOAT; + internal_format = GL_RG16F; + pixels = weights.bilinear_weights_fp16.get(); + } + + tex.update(weights.src_bilinear_samples, weights.dst_samples, internal_format, GL_RG, type, pixels); +} + +ResampleComputeEffect::ResampleComputeEffect(ResampleEffect *parent) + : parent(parent), + input_width(1280), + input_height(720), + offset_x(0.0), + offset_y(0.0), + zoom_x(1.0), + zoom_y(1.0), + last_input_width(-1), + last_input_height(-1), + last_output_width(-1), + last_output_height(-1), + last_offset_x(0.0 / 0.0), // NaN. + last_offset_y(0.0 / 0.0), // NaN. + last_zoom_x(0.0 / 0.0), // NaN. + last_zoom_y(0.0 / 0.0) // NaN. +{ + register_int("input_width", &input_width); + register_int("input_height", &input_height); + register_int("output_width", &output_width); + register_int("output_height", &output_height); + register_float("offset_x", &offset_x); + register_float("offset_y", &offset_y); + register_float("zoom_x", &zoom_x); + register_float("zoom_y", &zoom_y); + register_uniform_sampler2d("sample_tex_horizontal", &uniform_sample_tex_horizontal); + register_uniform_sampler2d("sample_tex_vertical", &uniform_sample_tex_vertical); + register_uniform_int("num_horizontal_samples", &uniform_num_horizontal_samples); + register_uniform_int("num_vertical_samples", &uniform_num_vertical_samples); + register_uniform_int("vertical_int_radius", &uniform_vertical_int_radius); + register_uniform_float("inv_vertical_scaling_factor", &uniform_inv_vertical_scaling_factor); + register_uniform_int("output_samples_per_block", &uniform_output_samples_per_block); + register_uniform_int("num_horizontal_filters", &uniform_num_horizontal_filters); + register_uniform_int("num_vertical_filters", &uniform_num_vertical_filters); + register_uniform_float("slice_height", &uniform_slice_height); + register_uniform_float("horizontal_whole_pixel_offset", &uniform_horizontal_whole_pixel_offset); + register_uniform_int("vertical_whole_pixel_offset", &uniform_vertical_whole_pixel_offset); + register_uniform_float("inv_input_height", &uniform_inv_input_height); + register_uniform_float("input_texcoord_y_adjust", &uniform_input_texcoord_y_adjust); + + call_once(lanczos_table_init_done, init_lanczos_table); +} + +ResampleComputeEffect::~ResampleComputeEffect() +{ +} + +string ResampleComputeEffect::output_fragment_shader() +{ + char buf[256] = ""; + return buf + read_file("resample_effect.comp"); +} + +// The compute shader does horizontal scaling first, using exactly the same +// two-component texture format as in the two-pass version (see the comments +// on ResampleComputeEffect). The vertical scaling calculates the offset values +// in the shader, so we only store a one-component texture with the weights +// for each filter. +void ResampleComputeEffect::update_texture(GLuint glsl_program_num, const string &prefix, unsigned *sampler_num) +{ + ScalingWeights horiz_weights = calculate_bilinear_scaling_weights(input_width, output_width, zoom_x, offset_x, BilinearFormatConstraints::ALLOW_FP32_ONLY); + ScalingWeights vert_weights = calculate_raw_scaling_weights(input_height, output_height, zoom_y, offset_y); + uniform_vertical_int_radius = vert_weights.int_radius; + vertical_scaling_factor = vert_weights.scaling_factor; + uniform_inv_vertical_scaling_factor = 1.0f / vert_weights.scaling_factor; + src_horizontal_bilinear_samples = horiz_weights.src_bilinear_samples; + src_vertical_samples = vert_weights.src_bilinear_samples; + uniform_num_horizontal_filters = horiz_weights.dst_samples; + uniform_num_vertical_filters = vert_weights.dst_samples; + slice_height = 1.0f / horiz_weights.num_loops; + + // Encode as a two-component texture. Note the GL_REPEAT. + glActiveTexture(GL_TEXTURE0 + *sampler_num); + check_error(); + glBindTexture(GL_TEXTURE_2D, tex_horiz.get_texnum()); + check_error(); + + tex_horiz.update(horiz_weights.src_bilinear_samples, horiz_weights.dst_samples, GL_RG32F, GL_RG, GL_FLOAT, horiz_weights.bilinear_weights_fp32.get()); + + glActiveTexture(GL_TEXTURE0 + *sampler_num + 1); + check_error(); + glBindTexture(GL_TEXTURE_2D, tex_vert.get_texnum()); + check_error(); + + // Storing the vertical weights as fp16 instead of fp32 saves a few + // percent on NVIDIA, and it doesn't seem to hurt quality any. + // (The horizontal weights is a different story, since the offsets + // can get large and are fairly accuracy-sensitive. Also, they are + // loaded only once per workgroup, at the very beginning.) + tex_vert.update(vert_weights.src_bilinear_samples, vert_weights.dst_samples, GL_R16F, GL_RED, GL_HALF_FLOAT, vert_weights.raw_weights.get()); + + // Figure out how many output samples each compute shader block is going to output. + int usable_input_samples_per_block = 128 - 2 * uniform_vertical_int_radius; + int output_samples_per_block = int(floor(usable_input_samples_per_block * vertical_scaling_factor)); + if (output_samples_per_block < 1) { + output_samples_per_block = 1; + } + uniform_output_samples_per_block = output_samples_per_block; +} + +namespace { + +ScalingWeights calculate_scaling_weights(unsigned src_size, unsigned dst_size, float zoom, float offset) +{ + // Only needed if run from outside ResampleEffect. + call_once(lanczos_table_init_done, init_lanczos_table); + // For many resamplings (e.g. 640 -> 1280), we will end up with the same // set of samples over and over again in a loop. Thus, we can compute only // the first such loop, and then ask the card to repeat the texture for us. // This is both easier on the texture cache and lowers our CPU cost for // generating the kernel somewhat. float scaling_factor; + int num_loops; if (fabs(zoom - 1.0f) < 1e-6) { num_loops = gcd(src_size, dst_size); scaling_factor = float(dst_size) / float(src_size); @@ -462,7 +695,6 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str num_loops = 1; scaling_factor = zoom * float(dst_size) / float(src_size); } - slice_height = 1.0f / num_loops; unsigned dst_samples = dst_size / num_loops; // Sample the kernel in the right place. A diagram with a triangular kernel @@ -515,96 +747,105 @@ void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const str // to compute the destination pixel, and how many depend on the scaling factor. // Thus, the kernel width will vary with how much we scale. float radius_scaling_factor = min(scaling_factor, 1.0f); - int int_radius = lrintf(LANCZOS_RADIUS / radius_scaling_factor); - int src_samples = int_radius * 2 + 1; - Tap *weights = new Tap[dst_samples * src_samples]; + const int int_radius = lrintf(LANCZOS_RADIUS / radius_scaling_factor); + const int src_samples = int_radius * 2 + 1; + unique_ptr[]> weights(new Tap[dst_samples * src_samples]); float subpixel_offset = offset - lrintf(offset); // The part not covered by whole_pixel_offset. assert(subpixel_offset >= -0.5f && subpixel_offset <= 0.5f); + float inv_scaling_factor = 1.0f / scaling_factor; for (unsigned y = 0; y < dst_samples; ++y) { // Find the point around which we want to sample the source image, // compensating for differing pixel centers as the scale changes. - float center_src_y = (y + 0.5f) / scaling_factor - 0.5f; + float center_src_y = (y + 0.5f) * inv_scaling_factor - 0.5f; int base_src_y = lrintf(center_src_y); // Now sample pixels on each side around that point. + float inv_src_size = 1.0 / float(src_size); for (int i = 0; i < src_samples; ++i) { int src_y = base_src_y + i - int_radius; - float weight = lanczos_weight(radius_scaling_factor * (src_y - center_src_y - subpixel_offset), LANCZOS_RADIUS); + float weight = lanczos_weight_cached(radius_scaling_factor * (src_y - center_src_y - subpixel_offset)); weights[y * src_samples + i].weight = weight * radius_scaling_factor; - weights[y * src_samples + i].pos = (src_y + 0.5) / float(src_size); + weights[y * src_samples + i].pos = (src_y + 0.5f) * inv_src_size; } } + ScalingWeights ret; + ret.src_bilinear_samples = src_samples; + ret.dst_samples = dst_samples; + ret.int_radius = int_radius; + ret.scaling_factor = scaling_factor; + ret.num_loops = num_loops; + ret.bilinear_weights_fp16 = nullptr; + ret.bilinear_weights_fp32 = move(weights); + ret.raw_weights = nullptr; + return ret; +} + +} // namespace + +ScalingWeights calculate_bilinear_scaling_weights(unsigned src_size, unsigned dst_size, float zoom, float offset, BilinearFormatConstraints constraints) +{ + ScalingWeights ret = calculate_scaling_weights(src_size, dst_size, zoom, offset); + unique_ptr[]> weights = move(ret.bilinear_weights_fp32); + const int src_samples = ret.src_bilinear_samples; + // Now make use of the bilinear filtering in the GPU to reduce the number of samples // we need to make. Try fp16 first; if it's not accurate enough, we go to fp32. // Our tolerance level for total error is a bit higher than the one for invididual // samples, since one would assume overall errors in the shape don't matter as much. const float max_error = 2.0f / (255.0f * 255.0f); - Tap *bilinear_weights_fp16; - src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp16); - Tap *bilinear_weights_fp32 = NULL; - bool fallback_to_fp32 = false; + unique_ptr[]> bilinear_weights_fp16; + unique_ptr[]> bilinear_weights_fp32; double max_sum_sq_error_fp16 = 0.0; - for (unsigned y = 0; y < dst_samples; ++y) { - double sum_sq_error_fp16 = compute_sum_sq_error( - weights + y * src_samples, src_samples, - bilinear_weights_fp16 + y * src_bilinear_samples, src_bilinear_samples, - src_size); - max_sum_sq_error_fp16 = std::max(max_sum_sq_error_fp16, sum_sq_error_fp16); - if (max_sum_sq_error_fp16 > max_error) { - break; + int src_bilinear_samples; + if (constraints == BilinearFormatConstraints::ALLOW_FP32_ONLY) { + max_sum_sq_error_fp16 = numeric_limits::max(); + } else { + assert(constraints == BilinearFormatConstraints::ALLOW_FP16_AND_FP32); + src_bilinear_samples = combine_many_samples(weights.get(), src_size, src_samples, ret.dst_samples, &bilinear_weights_fp16); + for (unsigned y = 0; y < ret.dst_samples; ++y) { + double sum_sq_error_fp16 = compute_sum_sq_error( + weights.get() + y * src_samples, src_samples, + bilinear_weights_fp16.get() + y * src_bilinear_samples, src_bilinear_samples, + src_size); + max_sum_sq_error_fp16 = std::max(max_sum_sq_error_fp16, sum_sq_error_fp16); + if (max_sum_sq_error_fp16 > max_error) { + break; + } } } if (max_sum_sq_error_fp16 > max_error) { - fallback_to_fp32 = true; - src_bilinear_samples = combine_many_samples(weights, src_size, src_samples, dst_samples, &bilinear_weights_fp32); + bilinear_weights_fp16.reset(); + src_bilinear_samples = combine_many_samples(weights.get(), src_size, src_samples, ret.dst_samples, &bilinear_weights_fp32); } - // Encode as a two-component texture. Note the GL_REPEAT. - glActiveTexture(GL_TEXTURE0 + *sampler_num); - check_error(); - glBindTexture(GL_TEXTURE_2D, texnum); - check_error(); - if (last_texture_width == -1) { - // Need to set this state the first time. - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); - check_error(); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); - check_error(); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); - check_error(); - } - - GLenum type, internal_format; - void *pixels; - if (fallback_to_fp32) { - type = GL_FLOAT; - internal_format = GL_RG32F; - pixels = bilinear_weights_fp32; - } else { - type = GL_HALF_FLOAT; - internal_format = GL_RG16F; - pixels = bilinear_weights_fp16; - } + ret.src_bilinear_samples = src_bilinear_samples; + ret.bilinear_weights_fp16 = move(bilinear_weights_fp16); + ret.bilinear_weights_fp32 = move(bilinear_weights_fp32); + return ret; +} - if (int(src_bilinear_samples) == last_texture_width && - int(dst_samples) == last_texture_height && - internal_format == last_texture_internal_format) { - // Texture dimensions and type are unchanged; it is more efficient - // to just update it rather than making an entirely new texture. - glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, src_bilinear_samples, dst_samples, GL_RG, type, pixels); - } else { - glTexImage2D(GL_TEXTURE_2D, 0, internal_format, src_bilinear_samples, dst_samples, 0, GL_RG, type, pixels); - last_texture_width = src_bilinear_samples; - last_texture_height = dst_samples; - last_texture_internal_format = internal_format; +// Unlike calculate_bilinear_scaling_weights(), this just converts the weights, +// without any combining trickery. Thus, it is also much faster. +ScalingWeights calculate_raw_scaling_weights(unsigned src_size, unsigned dst_size, float zoom, float offset) +{ + ScalingWeights ret = calculate_scaling_weights(src_size, dst_size, zoom, offset); + unique_ptr[]> weights = move(ret.bilinear_weights_fp32); + const int src_samples = ret.src_bilinear_samples; + + // Convert to fp16 (without any positions, as they are calculated implicitly + // by the compute shader) and normalize. + unique_ptr raw_weights(new fp16_int_t[ret.dst_samples * src_samples]); + for (unsigned y = 0; y < ret.dst_samples; ++y) { + for (int i = 0; i < src_samples; ++i) { + raw_weights[y * src_samples + i] = fp32_to_fp16(weights[y * src_samples + i].weight); + } + normalize_sum(raw_weights.get() + y * src_samples, src_samples); } - check_error(); - delete[] weights; - delete[] bilinear_weights_fp16; - delete[] bilinear_weights_fp32; + ret.raw_weights = move(raw_weights); + return ret; } void SingleResamplePassEffect::set_gl_state(GLuint glsl_program_num, const string &prefix, unsigned *sampler_num) @@ -633,7 +874,7 @@ void SingleResamplePassEffect::set_gl_state(GLuint glsl_program_num, const strin glActiveTexture(GL_TEXTURE0 + *sampler_num); check_error(); - glBindTexture(GL_TEXTURE_2D, texnum); + glBindTexture(GL_TEXTURE_2D, tex.get_texnum()); check_error(); uniform_sample_tex = *sampler_num; @@ -651,14 +892,107 @@ void SingleResamplePassEffect::set_gl_state(GLuint glsl_program_num, const strin } else { uniform_whole_pixel_offset = lrintf(offset) / float(input_width); } +} + +Support2DTexture::Support2DTexture() +{ + glGenTextures(1, &texnum); + check_error(); + glBindTexture(GL_TEXTURE_2D, texnum); + check_error(); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + check_error(); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); + check_error(); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); + check_error(); +} + +Support2DTexture::~Support2DTexture() +{ + glDeleteTextures(1, &texnum); + check_error(); +} + +void Support2DTexture::update(GLint width, GLint height, GLenum internal_format, GLenum format, GLenum type, const GLvoid * data) +{ + glBindTexture(GL_TEXTURE_2D, texnum); + check_error(); + if (width == last_texture_width && + height == last_texture_height && + internal_format == last_texture_internal_format) { + // Texture dimensions and type are unchanged; it is more efficient + // to just update it rather than making an entirely new texture. + glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, format, type, data); + check_error(); + } else { + glTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0, format, type, data); + check_error(); + last_texture_width = width; + last_texture_height = height; + last_texture_internal_format = internal_format; + } +} + +void ResampleComputeEffect::get_compute_dimensions(unsigned output_width, unsigned output_height, + unsigned *x, unsigned *y, unsigned *z) const +{ + *x = output_width; + *y = (output_height + uniform_output_samples_per_block - 1) / uniform_output_samples_per_block; + *z = 1; +} + +void ResampleComputeEffect::set_gl_state(GLuint glsl_program_num, const string &prefix, unsigned *sampler_num) +{ + Effect::set_gl_state(glsl_program_num, prefix, sampler_num); + + assert(input_width > 0); + assert(input_height > 0); + assert(output_width > 0); + assert(output_height > 0); + + if (input_width != last_input_width || + input_height != last_input_height || + output_width != last_output_width || + output_height != last_output_height || + offset_x != last_offset_x || + offset_y != last_offset_y || + zoom_x != last_zoom_x || + zoom_x != last_zoom_y) { + update_texture(glsl_program_num, prefix, sampler_num); + last_input_width = input_width; + last_input_height = input_height; + last_output_width = output_width; + last_output_height = output_height; + last_offset_x = offset_x; + last_offset_y = offset_y; + last_zoom_x = zoom_x; + last_zoom_y = zoom_y; + } + + glActiveTexture(GL_TEXTURE0 + *sampler_num); + check_error(); + glBindTexture(GL_TEXTURE_2D, tex_horiz.get_texnum()); + check_error(); + uniform_sample_tex_horizontal = *sampler_num; + ++*sampler_num; - // We specifically do not want mipmaps on the input texture; - // they break minification. - Node *self = chain->find_node_for_effect(this); - glActiveTexture(chain->get_input_sampler(self, 0)); + glActiveTexture(GL_TEXTURE0 + *sampler_num); check_error(); - glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glBindTexture(GL_TEXTURE_2D, tex_vert.get_texnum()); check_error(); + uniform_sample_tex_vertical = *sampler_num; + ++*sampler_num; + + uniform_num_horizontal_samples = src_horizontal_bilinear_samples; + uniform_num_vertical_samples = src_vertical_samples; + uniform_slice_height = slice_height; + + uniform_horizontal_whole_pixel_offset = lrintf(offset_x) / float(input_width); + uniform_vertical_whole_pixel_offset = lrintf(offset_y); + + uniform_inv_input_height = 1.0f / float(input_height); + uniform_input_texcoord_y_adjust = 0.5f / float(input_height); } } // namespace movit