+ return sinc(M_PI * x) * sinc((M_PI / LANCZOS_RADIUS) * x);
+ }
+}
+
+// The weight function can be expensive to compute over and over again
+// (which will happen during e.g. a zoom), but it is also easy to interpolate
+// linearly. We compute the right half of the function (in the range of
+// 0..LANCZOS_RADIUS), with two guard elements for easier interpolation, and
+// linearly interpolate to get our function.
+//
+// We want to scale the table so that the maximum error is always smaller
+// than 1e-6. As per http://www-solar.mcs.st-andrews.ac.uk/~clare/Lectures/num-analysis/Numan_chap3.pdf,
+// the error for interpolating a function linearly between points [a,b] is
+//
+// e = 1/2 (x-a)(x-b) f''(u_x)
+//
+// for some point u_x in [a,b] (where f(x) is our Lanczos function; we're
+// assuming LANCZOS_RADIUS=3 from here on). Obviously this is bounded by
+// f''(x) over the entire range. Numeric optimization shows the maximum of
+// |f''(x)| to be in x=1.09369819474562880, with the value 2.40067758733152381.
+// So if the steps between consecutive values are called d, we get
+//
+// |e| <= 1/2 (d/2)^2 2.4007
+// |e| <= 0.1367 d^2
+//
+// Solve for e = 1e-6 yields a step size of 0.0027, which to cover the range
+// 0..3 needs 1109 steps. We round up to the next power of two, just to be sure.
+//
+// You need to call lanczos_table_init_done before the first call to
+// lanczos_weight_cached.
+#define LANCZOS_TABLE_SIZE 2048
+static once_flag lanczos_table_init_done;
+float lanczos_table[LANCZOS_TABLE_SIZE + 2];
+
+void init_lanczos_table()
+{
+ for (unsigned i = 0; i < LANCZOS_TABLE_SIZE + 2; ++i) {
+ lanczos_table[i] = lanczos_weight(float(i) * (LANCZOS_RADIUS / LANCZOS_TABLE_SIZE));
+ }
+}
+
+float lanczos_weight_cached(float x)
+{
+ x = fabs(x);
+ if (x > LANCZOS_RADIUS) {
+ return 0.0f;
+ }
+ float table_pos = x * (LANCZOS_TABLE_SIZE / LANCZOS_RADIUS);
+ unsigned table_pos_int = int(table_pos); // Truncate towards zero.
+ float table_pos_frac = table_pos - table_pos_int;
+ assert(table_pos < LANCZOS_TABLE_SIZE + 2);
+ return lanczos_table[table_pos_int] +
+ table_pos_frac * (lanczos_table[table_pos_int + 1] - lanczos_table[table_pos_int]);
+}
+
+// Euclid's algorithm, from Wikipedia.
+unsigned gcd(unsigned a, unsigned b)
+{
+ while (b != 0) {
+ unsigned t = b;
+ b = a % b;
+ a = t;
+ }
+ return a;
+}
+
+template<class DestFloat>
+unsigned combine_samples(const Tap<float> *src, Tap<DestFloat> *dst, float num_subtexels, float inv_num_subtexels, unsigned num_src_samples, unsigned max_samples_saved, float pos1_pos2_diff, float inv_pos1_pos2_diff)
+{
+ // Cut off near-zero values at both sides.
+ unsigned num_samples_saved = 0;
+ while (num_samples_saved < max_samples_saved &&
+ num_src_samples > 0 &&
+ fabs(src[0].weight) < 1e-6) {
+ ++src;
+ --num_src_samples;
+ ++num_samples_saved;
+ }
+ while (num_samples_saved < max_samples_saved &&
+ num_src_samples > 0 &&
+ fabs(src[num_src_samples - 1].weight) < 1e-6) {
+ --num_src_samples;
+ ++num_samples_saved;
+ }
+
+ for (unsigned i = 0, j = 0; i < num_src_samples; ++i, ++j) {
+ // Copy the sample directly; it will be overwritten later if we can combine.
+ if (dst != nullptr) {
+ dst[j].weight = convert_float<float, DestFloat>(src[i].weight);
+ dst[j].pos = convert_float<float, DestFloat>(src[i].pos);
+ }
+
+ if (i == num_src_samples - 1) {
+ // Last sample; cannot combine.
+ continue;
+ }
+ assert(num_samples_saved <= max_samples_saved);
+ if (num_samples_saved == max_samples_saved) {
+ // We could maybe save more here, but other rows can't, so don't bother.
+ continue;
+ }
+
+ float w1 = src[i].weight;
+ float w2 = src[i + 1].weight;
+ if (w1 * w2 < 0.0f) {
+ // Differing signs; cannot combine.
+ continue;
+ }
+
+ float pos1 = src[i].pos;
+ float pos2 = src[i + 1].pos;
+ assert(pos2 > pos1);
+
+ DestFloat pos, total_weight;
+ float sum_sq_error;
+ combine_two_samples(w1, w2, pos1, pos1_pos2_diff, inv_pos1_pos2_diff, num_subtexels, inv_num_subtexels, &pos, &total_weight, &sum_sq_error);
+
+ // If the interpolation error is larger than that of about sqrt(2) of
+ // a level at 8-bit precision, don't combine. (You'd think 1.0 was enough,
+ // but since the artifacts are not really random, they can get quite
+ // visible. On the other hand, going to 0.25f, I can see no change at
+ // all with 8-bit output, so it would not seem to be worth it.)
+ if (sum_sq_error > 0.5f / (255.0f * 255.0f)) {
+ continue;
+ }
+
+ // OK, we can combine this and the next sample.
+ if (dst != nullptr) {
+ dst[j].weight = total_weight;
+ dst[j].pos = pos;
+ }
+
+ ++i; // Skip the next sample.
+ ++num_samples_saved;
+ }
+ return num_samples_saved;
+}
+
+// Normalize so that the sum becomes one. Note that we do it twice;
+// this sometimes helps a tiny little bit when we have many samples.
+template<class T>
+void normalize_sum(Tap<T>* vals, unsigned num)
+{
+ for (int normalize_pass = 0; normalize_pass < 2; ++normalize_pass) {
+ float sum = 0.0;
+ for (unsigned i = 0; i < num; ++i) {
+ sum += to_fp32(vals[i].weight);
+ }
+ float inv_sum = 1.0 / sum;
+ for (unsigned i = 0; i < num; ++i) {
+ vals[i].weight = from_fp32<T>(to_fp32(vals[i].weight) * inv_sum);
+ }
+ }
+}
+
+// Make use of the bilinear filtering in the GPU to reduce the number of samples
+// we need to make. This is a bit more complex than BlurEffect since we cannot combine
+// two neighboring samples if their weights have differing signs, so we first need to
+// figure out the maximum number of samples. Then, we downconvert all the weights to
+// that number -- we could have gone for a variable-length system, but this is simpler,
+// and the gains would probably be offset by the extra cost of checking when to stop.
+//
+// The greedy strategy for combining samples is optimal.
+template<class DestFloat>
+unsigned combine_many_samples(const Tap<float> *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, unique_ptr<Tap<DestFloat>[]> *bilinear_weights)
+{
+ float num_subtexels = src_size / movit_texel_subpixel_precision;
+ float inv_num_subtexels = movit_texel_subpixel_precision / src_size;
+ float pos1_pos2_diff = 1.0f / src_size;
+ float inv_pos1_pos2_diff = src_size;
+
+ unsigned max_samples_saved = UINT_MAX;
+ for (unsigned y = 0; y < dst_samples && max_samples_saved > 0; ++y) {
+ unsigned num_samples_saved = combine_samples<DestFloat>(weights + y * src_samples, nullptr, num_subtexels, inv_num_subtexels, src_samples, max_samples_saved, pos1_pos2_diff, inv_pos1_pos2_diff);
+ max_samples_saved = min(max_samples_saved, num_samples_saved);
+ }
+
+ // Now that we know the right width, actually combine the samples.
+ unsigned src_bilinear_samples = src_samples - max_samples_saved;
+ bilinear_weights->reset(new Tap<DestFloat>[dst_samples * src_bilinear_samples]);
+ for (unsigned y = 0; y < dst_samples; ++y) {
+ Tap<DestFloat> *bilinear_weights_ptr = bilinear_weights->get() + y * src_bilinear_samples;
+ unsigned num_samples_saved = combine_samples(
+ weights + y * src_samples,
+ bilinear_weights_ptr,
+ num_subtexels,
+ inv_num_subtexels,
+ src_samples,
+ max_samples_saved,
+ pos1_pos2_diff,
+ inv_pos1_pos2_diff);
+ assert(num_samples_saved == max_samples_saved);
+ normalize_sum(bilinear_weights_ptr, src_bilinear_samples);