FFTPassEffect::FFTPassEffect()
: input_width(1280),
input_height(720),
- direction(HORIZONTAL)
+ direction(HORIZONTAL),
+ last_fft_size(-1),
+ last_direction(INVALID),
+ last_pass_number(-1),
+ last_inverse(-1),
+ last_input_size(-1)
{
register_int("fft_size", &fft_size);
register_int("direction", (int *)&direction);
register_int("pass_number", &pass_number);
register_int("inverse", &inverse);
+ register_uniform_float("num_repeats", &uniform_num_repeats);
+ register_uniform_sampler2d("support_tex", &uniform_support_tex);
glGenTextures(1, &tex);
}
{
Effect::set_gl_state(glsl_program_num, prefix, sampler_num);
- int input_size = (direction == VERTICAL) ? input_height : input_width;
-
// This is needed because it counteracts the precision issues we get
// because we sample the input texture with normalized coordinates
// (especially when the repeat count along the axis is not a power of
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
check_error();
+ // Because of the memory layout (see below) and because we use offsets,
+ // the support texture values for many consecutive values will be
+ // the same. Thus, we can store a smaller texture (giving a small
+ // performance boost) and just sample it with NEAREST. Also, this
+ // counteracts any precision issues we might get from linear
+ // interpolation.
+ glActiveTexture(GL_TEXTURE0 + *sampler_num);
+ check_error();
+ glBindTexture(GL_TEXTURE_2D, tex);
+ check_error();
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ check_error();
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ check_error();
+ glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
+ check_error();
+
+ int input_size = (direction == VERTICAL) ? input_height : input_width;
+ if (last_fft_size != fft_size ||
+ last_direction != direction ||
+ last_pass_number != pass_number ||
+ last_inverse != inverse ||
+ last_input_size != input_size) {
+ generate_support_texture();
+ }
+
+ uniform_support_tex = *sampler_num;
+ ++*sampler_num;
+
+ assert(input_size % fft_size == 0);
+ uniform_num_repeats = input_size / fft_size;
+}
+
+void FFTPassEffect::generate_support_texture()
+{
+ int input_size = (direction == VERTICAL) ? input_height : input_width;
+
// The memory layout follows figure 5.2 on page 25 of
// http://gpuwave.sesse.net/gpuwave.pdf -- it can be a bit confusing
// at first, but is classically explained more or less as follows:
// bit, so the stride is 8, and so on.
assert((fft_size & (fft_size - 1)) == 0); // Must be power of two.
- fp16_int_t *tmp = new fp16_int_t[fft_size * 4];
int subfft_size = 1 << pass_number;
+ fp16_int_t *tmp = new fp16_int_t[subfft_size * 4];
double mulfac;
if (inverse) {
mulfac = 2.0 * M_PI;
assert((fft_size & (fft_size - 1)) == 0); // Must be power of two.
assert(fft_size % subfft_size == 0);
int stride = fft_size / subfft_size;
- for (int i = 0; i < fft_size; ++i) {
- int k = i / stride; // Element number within this sub-FFT.
- int offset = i % stride; // Sub-FFT number.
+ for (int i = 0; i < subfft_size; i++) {
+ int k = i;
double twiddle_real, twiddle_imag;
if (k < subfft_size / 2) {
// for using offsets and not direct coordinates as in GPUwave
// is that we can have multiple FFTs along the same line,
// and want to reuse the support texture by repeating it.
- int base = k * stride * 2 + offset;
+ int base = k * stride * 2;
int support_texture_index = i;
int src1 = base;
int src2 = base + stride;
+ double sign = 1.0;
if (direction == FFTPassEffect::VERTICAL) {
// Compensate for OpenGL's bottom-left convention.
- support_texture_index = fft_size - support_texture_index - 1;
- src1 = fft_size - src1 - 1;
- src2 = fft_size - src2 - 1;
+ support_texture_index = subfft_size - support_texture_index - 1;
+ sign = -1.0;
}
- tmp[support_texture_index * 4 + 0] = fp64_to_fp16((src1 - support_texture_index) / double(input_size));
- tmp[support_texture_index * 4 + 1] = fp64_to_fp16((src2 - support_texture_index) / double(input_size));
- tmp[support_texture_index * 4 + 2] = fp64_to_fp16(twiddle_real);
- tmp[support_texture_index * 4 + 3] = fp64_to_fp16(twiddle_imag);
+ tmp[support_texture_index * 4 + 0] = fp32_to_fp16(sign * (src1 - i * stride) / double(input_size));
+ tmp[support_texture_index * 4 + 1] = fp32_to_fp16(sign * (src2 - i * stride) / double(input_size));
+ tmp[support_texture_index * 4 + 2] = fp32_to_fp16(twiddle_real);
+ tmp[support_texture_index * 4 + 3] = fp32_to_fp16(twiddle_imag);
}
- glActiveTexture(GL_TEXTURE0 + *sampler_num);
- check_error();
- glBindTexture(GL_TEXTURE_2D, tex);
- check_error();
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
- check_error();
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
- check_error();
- glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
- check_error();
-
// Supposedly FFTs are very sensitive to inaccuracies in the twiddle factors,
// at least according to a paper by Schatzman (see gpuwave.pdf reference [30]
// for the full reference); however, practical testing indicates that it's
// which gives a nice speed boost.
//
// Note that the source coordinates become somewhat less accurate too, though.
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, fft_size, 1, 0, GL_RGBA, GL_HALF_FLOAT, tmp);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, subfft_size, 1, 0, GL_RGBA, GL_HALF_FLOAT, tmp);
check_error();
delete[] tmp;
- set_uniform_int(glsl_program_num, prefix, "support_tex", *sampler_num);
- ++*sampler_num;
-
- assert(input_size % fft_size == 0);
- set_uniform_float(glsl_program_num, prefix, "num_repeats", input_size / fft_size);
+ last_fft_size = fft_size;
+ last_direction = direction;
+ last_pass_number = pass_number;
+ last_inverse = inverse;
+ last_input_size = input_size;
}
} // namespace movit