]> git.sesse.net Git - movit/blob - resample_effect.cpp
Compute version of ResampleEffect.
[movit] / resample_effect.cpp
1 // Three-lobed Lanczos, the most common choice.
2 // Note that if you change this, the accuracy for LANCZOS_TABLE_SIZE
3 // needs to be recomputed.
4 #define LANCZOS_RADIUS 3.0f
5
6 #include <epoxy/gl.h>
7 #include <assert.h>
8 #include <limits.h>
9 #include <math.h>
10 #include <stdio.h>
11 #include <algorithm>
12 #include <mutex>
13 #include <Eigen/Sparse>
14 #include <Eigen/SparseQR>
15 #include <Eigen/OrderingMethods>
16
17 #include "effect_chain.h"
18 #include "effect_util.h"
19 #include "fp16.h"
20 #include "init.h"
21 #include "resample_effect.h"
22 #include "util.h"
23
24 using namespace Eigen;
25 using namespace std;
26
27 namespace movit {
28
29 namespace {
30
31 float sinc(float x)
32 {
33         if (fabs(x) < 1e-6) {
34                 return 1.0f - fabs(x);
35         } else {
36                 return sin(x) / x;
37         }
38 }
39
40 float lanczos_weight(float x)
41 {
42         if (fabs(x) > LANCZOS_RADIUS) {
43                 return 0.0f;
44         } else {
45                 return sinc(M_PI * x) * sinc((M_PI / LANCZOS_RADIUS) * x);
46         }
47 }
48
49 // The weight function can be expensive to compute over and over again
50 // (which will happen during e.g. a zoom), but it is also easy to interpolate
51 // linearly. We compute the right half of the function (in the range of
52 // 0..LANCZOS_RADIUS), with two guard elements for easier interpolation, and
53 // linearly interpolate to get our function.
54 //
55 // We want to scale the table so that the maximum error is always smaller
56 // than 1e-6. As per http://www-solar.mcs.st-andrews.ac.uk/~clare/Lectures/num-analysis/Numan_chap3.pdf,
57 // the error for interpolating a function linearly between points [a,b] is
58 //
59 //   e = 1/2 (x-a)(x-b) f''(u_x)
60 //
61 // for some point u_x in [a,b] (where f(x) is our Lanczos function; we're
62 // assuming LANCZOS_RADIUS=3 from here on). Obviously this is bounded by
63 // f''(x) over the entire range. Numeric optimization shows the maximum of
64 // |f''(x)| to be in x=1.09369819474562880, with the value 2.40067758733152381.
65 // So if the steps between consecutive values are called d, we get
66 //
67 //   |e| <= 1/2 (d/2)^2 2.4007
68 //   |e| <= 0.1367 d^2
69 //
70 // Solve for e = 1e-6 yields a step size of 0.0027, which to cover the range
71 // 0..3 needs 1109 steps. We round up to the next power of two, just to be sure.
72 //
73 // You need to call lanczos_table_init_done before the first call to
74 // lanczos_weight_cached.
75 #define LANCZOS_TABLE_SIZE 2048
76 static once_flag lanczos_table_init_done;
77 float lanczos_table[LANCZOS_TABLE_SIZE + 2];
78
79 void init_lanczos_table()
80 {
81         for (unsigned i = 0; i < LANCZOS_TABLE_SIZE + 2; ++i) {
82                 lanczos_table[i] = lanczos_weight(float(i) * (LANCZOS_RADIUS / LANCZOS_TABLE_SIZE));
83         }
84 }
85
86 float lanczos_weight_cached(float x)
87 {
88         x = fabs(x);
89         if (x > LANCZOS_RADIUS) {
90                 return 0.0f;
91         }
92         float table_pos = x * (LANCZOS_TABLE_SIZE / LANCZOS_RADIUS);
93         unsigned table_pos_int = int(table_pos);  // Truncate towards zero.
94         float table_pos_frac = table_pos - table_pos_int;
95         assert(table_pos < LANCZOS_TABLE_SIZE + 2);
96         return lanczos_table[table_pos_int] +
97                 table_pos_frac * (lanczos_table[table_pos_int + 1] - lanczos_table[table_pos_int]);
98 }
99
100 // Euclid's algorithm, from Wikipedia.
101 unsigned gcd(unsigned a, unsigned b)
102 {
103         while (b != 0) {
104                 unsigned t = b;
105                 b = a % b;
106                 a = t;
107         }
108         return a;
109 }
110
111 template<class DestFloat>
112 unsigned combine_samples(const Tap<float> *src, Tap<DestFloat> *dst, float num_subtexels, float inv_num_subtexels, unsigned num_src_samples, unsigned max_samples_saved, float pos1_pos2_diff, float inv_pos1_pos2_diff)
113 {
114         // Cut off near-zero values at both sides.
115         unsigned num_samples_saved = 0;
116         while (num_samples_saved < max_samples_saved &&
117                num_src_samples > 0 &&
118                fabs(src[0].weight) < 1e-6) {
119                 ++src;
120                 --num_src_samples;
121                 ++num_samples_saved;
122         }
123         while (num_samples_saved < max_samples_saved &&
124                num_src_samples > 0 &&
125                fabs(src[num_src_samples - 1].weight) < 1e-6) {
126                 --num_src_samples;
127                 ++num_samples_saved;
128         }
129
130         for (unsigned i = 0, j = 0; i < num_src_samples; ++i, ++j) {
131                 // Copy the sample directly; it will be overwritten later if we can combine.
132                 if (dst != nullptr) {
133                         dst[j].weight = convert_float<float, DestFloat>(src[i].weight);
134                         dst[j].pos = convert_float<float, DestFloat>(src[i].pos);
135                 }
136
137                 if (i == num_src_samples - 1) {
138                         // Last sample; cannot combine.
139                         continue;
140                 }
141                 assert(num_samples_saved <= max_samples_saved);
142                 if (num_samples_saved == max_samples_saved) {
143                         // We could maybe save more here, but other rows can't, so don't bother.
144                         continue;
145                 }
146
147                 float w1 = src[i].weight;
148                 float w2 = src[i + 1].weight;
149                 if (w1 * w2 < 0.0f) {
150                         // Differing signs; cannot combine.
151                         continue;
152                 }
153
154                 float pos1 = src[i].pos;
155                 float pos2 = src[i + 1].pos;
156                 assert(pos2 > pos1);
157
158                 DestFloat pos, total_weight;
159                 float sum_sq_error;
160                 combine_two_samples(w1, w2, pos1, pos1_pos2_diff, inv_pos1_pos2_diff, num_subtexels, inv_num_subtexels, &pos, &total_weight, &sum_sq_error);
161
162                 // If the interpolation error is larger than that of about sqrt(2) of
163                 // a level at 8-bit precision, don't combine. (You'd think 1.0 was enough,
164                 // but since the artifacts are not really random, they can get quite
165                 // visible. On the other hand, going to 0.25f, I can see no change at
166                 // all with 8-bit output, so it would not seem to be worth it.)
167                 if (sum_sq_error > 0.5f / (255.0f * 255.0f)) {
168                         continue;
169                 }
170
171                 // OK, we can combine this and the next sample.
172                 if (dst != nullptr) {
173                         dst[j].weight = total_weight;
174                         dst[j].pos = pos;
175                 }
176
177                 ++i;  // Skip the next sample.
178                 ++num_samples_saved;
179         }
180         return num_samples_saved;
181 }
182
183 // Normalize so that the sum becomes one. Note that we do it twice;
184 // this sometimes helps a tiny little bit when we have many samples.
185 template<class T>
186 void normalize_sum(Tap<T>* vals, unsigned num)
187 {
188         for (int normalize_pass = 0; normalize_pass < 2; ++normalize_pass) {
189                 float sum = 0.0;
190                 for (unsigned i = 0; i < num; ++i) {
191                         sum += to_fp32(vals[i].weight);
192                 }
193                 float inv_sum = 1.0 / sum;
194                 for (unsigned i = 0; i < num; ++i) {
195                         vals[i].weight = from_fp32<T>(to_fp32(vals[i].weight) * inv_sum);
196                 }
197         }
198 }
199
200 template<class T>
201 void normalize_sum(T* vals, unsigned num)
202 {
203         for (int normalize_pass = 0; normalize_pass < 2; ++normalize_pass) {
204                 float sum = 0.0;
205                 for (unsigned i = 0; i < num; ++i) {
206                         sum += to_fp32(vals[i]);
207                 }
208                 float inv_sum = 1.0 / sum;
209                 for (unsigned i = 0; i < num; ++i) {
210                         vals[i] = from_fp32<T>(to_fp32(vals[i]) * inv_sum);
211                 }
212         }
213 }
214
215 // Make use of the bilinear filtering in the GPU to reduce the number of samples
216 // we need to make. This is a bit more complex than BlurEffect since we cannot combine
217 // two neighboring samples if their weights have differing signs, so we first need to
218 // figure out the maximum number of samples. Then, we downconvert all the weights to
219 // that number -- we could have gone for a variable-length system, but this is simpler,
220 // and the gains would probably be offset by the extra cost of checking when to stop.
221 //
222 // The greedy strategy for combining samples is optimal.
223 template<class DestFloat>
224 unsigned combine_many_samples(const Tap<float> *weights, unsigned src_size, unsigned src_samples, unsigned dst_samples, unique_ptr<Tap<DestFloat>[]> *bilinear_weights)
225 {
226         float num_subtexels = src_size / movit_texel_subpixel_precision;
227         float inv_num_subtexels = movit_texel_subpixel_precision / src_size;
228         float pos1_pos2_diff = 1.0f / src_size;
229         float inv_pos1_pos2_diff = src_size;
230
231         unsigned max_samples_saved = UINT_MAX;
232         for (unsigned y = 0; y < dst_samples && max_samples_saved > 0; ++y) {
233                 unsigned num_samples_saved = combine_samples<DestFloat>(weights + y * src_samples, nullptr, num_subtexels, inv_num_subtexels, src_samples, max_samples_saved, pos1_pos2_diff, inv_pos1_pos2_diff);
234                 max_samples_saved = min(max_samples_saved, num_samples_saved);
235         }
236
237         // Now that we know the right width, actually combine the samples.
238         unsigned src_bilinear_samples = src_samples - max_samples_saved;
239         bilinear_weights->reset(new Tap<DestFloat>[dst_samples * src_bilinear_samples]);
240         for (unsigned y = 0; y < dst_samples; ++y) {
241                 Tap<DestFloat> *bilinear_weights_ptr = bilinear_weights->get() + y * src_bilinear_samples;
242                 unsigned num_samples_saved = combine_samples(
243                         weights + y * src_samples,
244                         bilinear_weights_ptr,
245                         num_subtexels,
246                         inv_num_subtexels,
247                         src_samples,
248                         max_samples_saved,
249                         pos1_pos2_diff,
250                         inv_pos1_pos2_diff);
251                 assert(num_samples_saved == max_samples_saved);
252                 normalize_sum(bilinear_weights_ptr, src_bilinear_samples);
253         }
254         return src_bilinear_samples;
255 }
256
257 // Compute the sum of squared errors between the ideal weights (which are
258 // assumed to fall exactly on pixel centers) and the weights that result
259 // from sampling at <bilinear_weights>. The primary reason for the difference
260 // is inaccuracy in the sampling positions, both due to limited precision
261 // in storing them (already inherent in sending them in as fp16_int_t)
262 // and in subtexel sampling precision (which we calculate in this function).
263 template<class T>
264 double compute_sum_sq_error(const Tap<float>* weights, unsigned num_weights,
265                             const Tap<T>* bilinear_weights, unsigned num_bilinear_weights,
266                             unsigned size)
267 {
268         // Find the effective range of the bilinear-optimized kernel.
269         // Due to rounding of the positions, this is not necessarily the same
270         // as the intended range (ie., the range of the original weights).
271         int lower_pos = int(floor(to_fp32(bilinear_weights[0].pos) * size - 0.5f));
272         int upper_pos = int(ceil(to_fp32(bilinear_weights[num_bilinear_weights - 1].pos) * size - 0.5f)) + 2;
273         lower_pos = min<int>(lower_pos, lrintf(weights[0].pos * size - 0.5f));
274         upper_pos = max<int>(upper_pos, lrintf(weights[num_weights - 1].pos * size - 0.5f) + 1);
275
276         float* effective_weights = new float[upper_pos - lower_pos];
277         for (int i = 0; i < upper_pos - lower_pos; ++i) {
278                 effective_weights[i] = 0.0f;
279         }
280
281         // Now find the effective weights that result from this sampling.
282         for (unsigned i = 0; i < num_bilinear_weights; ++i) {
283                 const float pixel_pos = to_fp32(bilinear_weights[i].pos) * size - 0.5f;
284                 const int x0 = int(floor(pixel_pos)) - lower_pos;
285                 const int x1 = x0 + 1;
286                 const float f = lrintf((pixel_pos - (x0 + lower_pos)) / movit_texel_subpixel_precision) * movit_texel_subpixel_precision;
287
288                 assert(x0 >= 0);
289                 assert(x1 >= 0);
290                 assert(x0 < upper_pos - lower_pos);
291                 assert(x1 < upper_pos - lower_pos);
292
293                 effective_weights[x0] += to_fp32(bilinear_weights[i].weight) * (1.0f - f);
294                 effective_weights[x1] += to_fp32(bilinear_weights[i].weight) * f;
295         }
296
297         // Subtract the desired weights to get the error.
298         for (unsigned i = 0; i < num_weights; ++i) {
299                 const int x = lrintf(weights[i].pos * size - 0.5f) - lower_pos;
300                 assert(x >= 0);
301                 assert(x < upper_pos - lower_pos);
302
303                 effective_weights[x] -= weights[i].weight;
304         }
305
306         double sum_sq_error = 0.0;
307         for (unsigned i = 0; i < num_weights; ++i) {
308                 sum_sq_error += effective_weights[i] * effective_weights[i];
309         }
310
311         delete[] effective_weights;
312         return sum_sq_error;
313 }
314
315 }  // namespace
316
317 ResampleEffect::ResampleEffect()
318         : input_width(1280),
319           input_height(720),
320           offset_x(0.0f), offset_y(0.0f),
321           zoom_x(1.0f), zoom_y(1.0f),
322           zoom_center_x(0.5f), zoom_center_y(0.5f)
323 {
324         register_int("width", &output_width);
325         register_int("height", &output_height);
326
327         if (movit_compute_shaders_supported) {
328                 // The effect will forward resolution information to us.
329                 compute_effect_owner.reset(new ResampleComputeEffect(this));
330                 compute_effect = compute_effect_owner.get();
331         } else {
332                 // The first blur pass will forward resolution information to us.
333                 hpass_owner.reset(new SingleResamplePassEffect(this));
334                 hpass = hpass_owner.get();
335                 CHECK(hpass->set_int("direction", SingleResamplePassEffect::HORIZONTAL));
336                 vpass_owner.reset(new SingleResamplePassEffect(this));
337                 vpass = vpass_owner.get();
338                 CHECK(vpass->set_int("direction", SingleResamplePassEffect::VERTICAL));
339         }
340
341         update_size();
342 }
343
344 ResampleEffect::~ResampleEffect()
345 {
346 }
347
348 void ResampleEffect::rewrite_graph(EffectChain *graph, Node *self)
349 {
350         if (compute_effect != nullptr) {
351                 Node *compute_node = graph->add_node(compute_effect_owner.release());
352                 graph->replace_receiver(self, compute_node);
353                 graph->replace_sender(self, compute_node);
354         } else {
355                 Node *hpass_node = graph->add_node(hpass_owner.release());
356                 Node *vpass_node = graph->add_node(vpass_owner.release());
357                 graph->connect_nodes(hpass_node, vpass_node);
358                 graph->replace_receiver(self, hpass_node);
359                 graph->replace_sender(self, vpass_node);
360         }
361         self->disabled = true;
362
363
364 // We get this information forwarded from the first blur pass,
365 // since we are not part of the chain ourselves.
366 void ResampleEffect::inform_input_size(unsigned input_num, unsigned width, unsigned height)
367 {
368         assert(input_num == 0);
369         assert(width != 0);
370         assert(height != 0);
371         input_width = width;
372         input_height = height;
373         update_size();
374 }
375
376 void ResampleEffect::update_size()
377 {
378         bool ok = true;
379         if (compute_effect != nullptr) {
380                 ok |= compute_effect->set_int("input_width", input_width);
381                 ok |= compute_effect->set_int("input_height", input_height);
382                 ok |= compute_effect->set_int("output_width", output_width);
383                 ok |= compute_effect->set_int("output_height", output_height);
384         } else {
385                 ok |= hpass->set_int("input_width", input_width);
386                 ok |= hpass->set_int("input_height", input_height);
387                 ok |= hpass->set_int("output_width", output_width);
388                 ok |= hpass->set_int("output_height", input_height);
389
390                 ok |= vpass->set_int("input_width", output_width);
391                 ok |= vpass->set_int("input_height", input_height);
392                 ok |= vpass->set_int("output_width", output_width);
393                 ok |= vpass->set_int("output_height", output_height);
394         }
395         assert(ok);
396
397         // The offset added due to zoom may have changed with the size.
398         update_offset_and_zoom();
399 }
400
401 void ResampleEffect::update_offset_and_zoom()
402 {
403         bool ok = true;
404
405         // Zoom from the right origin. (zoom_center is given in normalized coordinates,
406         // i.e. 0..1.)
407         float extra_offset_x = zoom_center_x * (1.0f - 1.0f / zoom_x) * input_width;
408         float extra_offset_y = (1.0f - zoom_center_y) * (1.0f - 1.0f / zoom_y) * input_height;
409
410         if (compute_effect != nullptr) {
411                 ok |= compute_effect->set_float("offset_x", extra_offset_x + offset_x);
412                 ok |= compute_effect->set_float("offset_y", extra_offset_y - offset_y);  // Compensate for the bottom-left origin.
413                 ok |= compute_effect->set_float("zoom_x", zoom_x);
414                 ok |= compute_effect->set_float("zoom_y", zoom_y);
415         } else {
416                 ok |= hpass->set_float("offset", extra_offset_x + offset_x);
417                 ok |= vpass->set_float("offset", extra_offset_y - offset_y);  // Compensate for the bottom-left origin.
418                 ok |= hpass->set_float("zoom", zoom_x);
419                 ok |= vpass->set_float("zoom", zoom_y);
420         }
421
422         assert(ok);
423 }
424
425 bool ResampleEffect::set_float(const string &key, float value) {
426         if (key == "width") {
427                 output_width = value;
428                 update_size();
429                 return true;
430         }
431         if (key == "height") {
432                 output_height = value;
433                 update_size();
434                 return true;
435         }
436         if (key == "top") {
437                 offset_y = value;
438                 update_offset_and_zoom();
439                 return true;
440         }
441         if (key == "left") {
442                 offset_x = value;
443                 update_offset_and_zoom();
444                 return true;
445         }
446         if (key == "zoom_x") {
447                 if (value <= 0.0f) {
448                         return false;
449                 }
450                 zoom_x = value;
451                 update_offset_and_zoom();
452                 return true;
453         }
454         if (key == "zoom_y") {
455                 if (value <= 0.0f) {
456                         return false;
457                 }
458                 zoom_y = value;
459                 update_offset_and_zoom();
460                 return true;
461         }
462         if (key == "zoom_center_x") {
463                 zoom_center_x = value;
464                 update_offset_and_zoom();
465                 return true;
466         }
467         if (key == "zoom_center_y") {
468                 zoom_center_y = value;
469                 update_offset_and_zoom();
470                 return true;
471         }
472         return false;
473 }
474
475 SingleResamplePassEffect::SingleResamplePassEffect(ResampleEffect *parent)
476         : parent(parent),
477           direction(HORIZONTAL),
478           input_width(1280),
479           input_height(720),
480           offset(0.0),
481           zoom(1.0),
482           last_input_width(-1),
483           last_input_height(-1),
484           last_output_width(-1),
485           last_output_height(-1),
486           last_offset(0.0 / 0.0),  // NaN.
487           last_zoom(0.0 / 0.0)  // NaN.
488 {
489         register_int("direction", (int *)&direction);
490         register_int("input_width", &input_width);
491         register_int("input_height", &input_height);
492         register_int("output_width", &output_width);
493         register_int("output_height", &output_height);
494         register_float("offset", &offset);
495         register_float("zoom", &zoom);
496         register_uniform_sampler2d("sample_tex", &uniform_sample_tex);
497         register_uniform_int("num_samples", &uniform_num_samples);
498         register_uniform_float("num_loops", &uniform_num_loops);
499         register_uniform_float("slice_height", &uniform_slice_height);
500         register_uniform_float("sample_x_scale", &uniform_sample_x_scale);
501         register_uniform_float("sample_x_offset", &uniform_sample_x_offset);
502         register_uniform_float("whole_pixel_offset", &uniform_whole_pixel_offset);
503
504         call_once(lanczos_table_init_done, init_lanczos_table);
505 }
506
507 SingleResamplePassEffect::~SingleResamplePassEffect()
508 {
509 }
510
511 string SingleResamplePassEffect::output_fragment_shader()
512 {
513         char buf[256];
514         sprintf(buf, "#define DIRECTION_VERTICAL %d\n", (direction == VERTICAL));
515         return buf + read_file("resample_effect.frag");
516 }
517
518 // Using vertical scaling as an example:
519 //
520 // Generally out[y] = w0 * in[yi] + w1 * in[yi + 1] + w2 * in[yi + 2] + ...
521 //
522 // Obviously, yi will depend on y (in a not-quite-linear way), but so will
523 // the weights w0, w1, w2, etc.. The easiest way of doing this is to encode,
524 // for each sample, the weight and the yi value, e.g. <yi, w0>, <yi + 1, w1>,
525 // and so on. For each y, we encode these along the x-axis (since that is spare),
526 // so out[0] will read from parameters <x,y> = <0,0>, <1,0>, <2,0> and so on.
527 //
528 // For horizontal scaling, we fill in the exact same texture;
529 // the shader just interprets it differently.
530 void SingleResamplePassEffect::update_texture(GLuint glsl_program_num, const string &prefix, unsigned *sampler_num)
531 {
532         unsigned src_size, dst_size;
533         if (direction == SingleResamplePassEffect::HORIZONTAL) {
534                 assert(input_height == output_height);
535                 src_size = input_width;
536                 dst_size = output_width;
537         } else if (direction == SingleResamplePassEffect::VERTICAL) {
538                 assert(input_width == output_width);
539                 src_size = input_height;
540                 dst_size = output_height;
541         } else {
542                 assert(false);
543         }
544
545         ScalingWeights weights = calculate_bilinear_scaling_weights(src_size, dst_size, zoom, offset, BilinearFormatConstraints::ALLOW_FP16_AND_FP32);
546         src_bilinear_samples = weights.src_bilinear_samples;
547         num_loops = weights.num_loops;
548         slice_height = 1.0f / weights.num_loops;
549
550         // Encode as a two-component texture. Note the GL_REPEAT.
551         glActiveTexture(GL_TEXTURE0 + *sampler_num);
552         check_error();
553         glBindTexture(GL_TEXTURE_2D, tex.get_texnum());
554         check_error();
555
556         GLenum type, internal_format;
557         void *pixels;
558         assert((weights.bilinear_weights_fp16 == nullptr) != (weights.bilinear_weights_fp32 == nullptr));
559         if (weights.bilinear_weights_fp32 != nullptr) {
560                 type = GL_FLOAT;
561                 internal_format = GL_RG32F;
562                 pixels = weights.bilinear_weights_fp32.get();
563         } else {
564                 type = GL_HALF_FLOAT;
565                 internal_format = GL_RG16F;
566                 pixels = weights.bilinear_weights_fp16.get();
567         }
568
569         tex.update(weights.src_bilinear_samples, weights.dst_samples, internal_format, GL_RG, type, pixels);
570 }
571
572 ResampleComputeEffect::ResampleComputeEffect(ResampleEffect *parent)
573         : parent(parent),
574           input_width(1280),
575           input_height(720),
576           offset_x(0.0),
577           offset_y(0.0),
578           zoom_x(1.0),
579           zoom_y(1.0),
580           last_input_width(-1),
581           last_input_height(-1),
582           last_output_width(-1),
583           last_output_height(-1),
584           last_offset_x(0.0 / 0.0),  // NaN.
585           last_offset_y(0.0 / 0.0),  // NaN.
586           last_zoom_x(0.0 / 0.0),  // NaN.
587           last_zoom_y(0.0 / 0.0)  // NaN.
588 {
589         register_int("input_width", &input_width);
590         register_int("input_height", &input_height);
591         register_int("output_width", &output_width);
592         register_int("output_height", &output_height);
593         register_float("offset_x", &offset_x);
594         register_float("offset_y", &offset_y);
595         register_float("zoom_x", &zoom_x);
596         register_float("zoom_y", &zoom_y);
597         register_uniform_sampler2d("sample_tex_horizontal", &uniform_sample_tex_horizontal);
598         register_uniform_sampler2d("sample_tex_vertical", &uniform_sample_tex_vertical);
599         register_uniform_int("num_horizontal_samples", &uniform_num_horizontal_samples);
600         register_uniform_int("num_vertical_samples", &uniform_num_vertical_samples);
601         register_uniform_int("vertical_int_radius", &uniform_vertical_int_radius);
602         register_uniform_float("inv_vertical_scaling_factor", &uniform_inv_vertical_scaling_factor);
603         register_uniform_int("output_samples_per_block", &uniform_output_samples_per_block);
604         register_uniform_int("num_horizontal_filters", &uniform_num_horizontal_filters);
605         register_uniform_int("num_vertical_filters", &uniform_num_vertical_filters);
606         register_uniform_float("slice_height", &uniform_slice_height);
607         register_uniform_float("horizontal_whole_pixel_offset", &uniform_horizontal_whole_pixel_offset);
608         register_uniform_int("vertical_whole_pixel_offset", &uniform_vertical_whole_pixel_offset);
609         register_uniform_float("inv_input_height", &uniform_inv_input_height);
610         register_uniform_float("input_texcoord_y_adjust", &uniform_input_texcoord_y_adjust);
611
612         call_once(lanczos_table_init_done, init_lanczos_table);
613 }
614
615 ResampleComputeEffect::~ResampleComputeEffect()
616 {
617 }
618
619 string ResampleComputeEffect::output_fragment_shader()
620 {
621         char buf[256] = "";
622         return buf + read_file("resample_effect.comp");
623 }
624
625 // The compute shader does horizontal scaling first, using exactly the same
626 // two-component texture format as in the two-pass version (see the comments
627 // on ResampleComputeEffect). The vertical scaling calculates the offset values
628 // in the shader, so we only store a one-component texture with the weights
629 // for each filter.
630 void ResampleComputeEffect::update_texture(GLuint glsl_program_num, const string &prefix, unsigned *sampler_num)
631 {
632         ScalingWeights horiz_weights = calculate_bilinear_scaling_weights(input_width, output_width, zoom_x, offset_x, BilinearFormatConstraints::ALLOW_FP32_ONLY);
633         ScalingWeights vert_weights = calculate_raw_scaling_weights(input_height, output_height, zoom_y, offset_y);
634         uniform_vertical_int_radius = vert_weights.int_radius;
635         vertical_scaling_factor = vert_weights.scaling_factor;
636         uniform_inv_vertical_scaling_factor = 1.0f / vert_weights.scaling_factor;
637         src_horizontal_bilinear_samples = horiz_weights.src_bilinear_samples;
638         src_vertical_samples = vert_weights.src_bilinear_samples;
639         uniform_num_horizontal_filters = horiz_weights.dst_samples;
640         uniform_num_vertical_filters = vert_weights.dst_samples;
641         slice_height = 1.0f / horiz_weights.num_loops;
642
643         // Encode as a two-component texture. Note the GL_REPEAT.
644         glActiveTexture(GL_TEXTURE0 + *sampler_num);
645         check_error();
646         glBindTexture(GL_TEXTURE_2D, tex_horiz.get_texnum());
647         check_error();
648
649         tex_horiz.update(horiz_weights.src_bilinear_samples, horiz_weights.dst_samples, GL_RG32F, GL_RG, GL_FLOAT, horiz_weights.bilinear_weights_fp32.get());
650
651         glActiveTexture(GL_TEXTURE0 + *sampler_num + 1);
652         check_error();
653         glBindTexture(GL_TEXTURE_2D, tex_vert.get_texnum());
654         check_error();
655
656         // Storing the vertical weights as fp16 instead of fp32 saves a few
657         // percent on NVIDIA, and it doesn't seem to hurt quality any.
658         // (The horizontal weights is a different story, since the offsets
659         // can get large and are fairly accuracy-sensitive. Also, they are
660         // loaded only once per workgroup, at the very beginning.)
661         tex_vert.update(vert_weights.src_bilinear_samples, vert_weights.dst_samples, GL_R16F, GL_RED, GL_HALF_FLOAT, vert_weights.raw_weights.get());
662
663         // Figure out how many output samples each compute shader block is going to output.
664         int usable_input_samples_per_block = 128 - 2 * uniform_vertical_int_radius;
665         int output_samples_per_block = int(floor(usable_input_samples_per_block * vertical_scaling_factor));
666         if (output_samples_per_block < 1) {
667                 output_samples_per_block = 1;
668         }
669         uniform_output_samples_per_block = output_samples_per_block;
670 }
671
672 namespace {
673
674 ScalingWeights calculate_scaling_weights(unsigned src_size, unsigned dst_size, float zoom, float offset)
675 {
676         // Only needed if run from outside ResampleEffect.
677         call_once(lanczos_table_init_done, init_lanczos_table);
678
679         // For many resamplings (e.g. 640 -> 1280), we will end up with the same
680         // set of samples over and over again in a loop. Thus, we can compute only
681         // the first such loop, and then ask the card to repeat the texture for us.
682         // This is both easier on the texture cache and lowers our CPU cost for
683         // generating the kernel somewhat.
684         float scaling_factor;
685         int num_loops;
686         if (fabs(zoom - 1.0f) < 1e-6) {
687                 num_loops = gcd(src_size, dst_size);
688                 scaling_factor = float(dst_size) / float(src_size);
689         } else {
690                 // If zooming is enabled (ie., zoom != 1), we turn off the looping.
691                 // We _could_ perhaps do it for rational zoom levels (especially
692                 // things like 2:1), but it doesn't seem to be worth it, given that
693                 // the most common use case would seem to be varying the zoom
694                 // from frame to frame.
695                 num_loops = 1;
696                 scaling_factor = zoom * float(dst_size) / float(src_size);
697         }
698         unsigned dst_samples = dst_size / num_loops;
699
700         // Sample the kernel in the right place. A diagram with a triangular kernel
701         // (corresponding to linear filtering, and obviously with radius 1)
702         // for easier ASCII art drawing:
703         //
704         //                *
705         //               / \                      |
706         //              /   \                     |
707         //             /     \                    |
708         //    x---x---x   x   x---x---x---x
709         //
710         // Scaling up (in this case, 2x) means sampling more densely:
711         //
712         //                *
713         //               / \                      |
714         //              /   \                     |
715         //             /     \                    |
716         //   x-x-x-x-x-x x x x-x-x-x-x-x-x-x
717         //
718         // When scaling up, any destination pixel will only be influenced by a few
719         // (in this case, two) neighboring pixels, and more importantly, the number
720         // will not be influenced by the scaling factor. (Note, however, that the
721         // pixel centers have moved, due to OpenGL's center-pixel convention.)
722         // The only thing that changes is the weights themselves, as the sampling
723         // points are at different distances from the original pixels.
724         //
725         // Scaling down is a different story:
726         //
727         //                *
728         //               / \                      |
729         //              /   \                     |
730         //             /     \                    |
731         //    --x------ x     --x-------x--
732         //
733         // Again, the pixel centers have moved in a maybe unintuitive fashion,
734         // although when you consider that there are multiple source pixels around,
735         // it's not so bad as at first look:
736         //
737         //            *   *   *   *
738         //           / \ / \ / \ / \              |
739         //          /   X   X   X   \             |
740         //         /   / \ / \ / \   \            |
741         //    --x-------x-------x-------x--
742         //
743         // As you can see, the new pixels become averages of the two neighboring old
744         // ones (the situation for Lanczos is of course more complex).
745         //
746         // Anyhow, in this case we clearly need to look at more source pixels
747         // to compute the destination pixel, and how many depend on the scaling factor.
748         // Thus, the kernel width will vary with how much we scale.
749         float radius_scaling_factor = min(scaling_factor, 1.0f);
750         const int int_radius = lrintf(LANCZOS_RADIUS / radius_scaling_factor);
751         const int src_samples = int_radius * 2 + 1;
752         unique_ptr<Tap<float>[]> weights(new Tap<float>[dst_samples * src_samples]);
753         float subpixel_offset = offset - lrintf(offset);  // The part not covered by whole_pixel_offset.
754         assert(subpixel_offset >= -0.5f && subpixel_offset <= 0.5f);
755         float inv_scaling_factor = 1.0f / scaling_factor;
756         for (unsigned y = 0; y < dst_samples; ++y) {
757                 // Find the point around which we want to sample the source image,
758                 // compensating for differing pixel centers as the scale changes.
759                 float center_src_y = (y + 0.5f) * inv_scaling_factor - 0.5f;
760                 int base_src_y = lrintf(center_src_y);
761
762                 // Now sample <int_radius> pixels on each side around that point.
763                 float inv_src_size = 1.0 / float(src_size);
764                 for (int i = 0; i < src_samples; ++i) {
765                         int src_y = base_src_y + i - int_radius;
766                         float weight = lanczos_weight_cached(radius_scaling_factor * (src_y - center_src_y - subpixel_offset));
767                         weights[y * src_samples + i].weight = weight * radius_scaling_factor;
768                         weights[y * src_samples + i].pos = (src_y + 0.5f) * inv_src_size;
769                 }
770         }
771
772         ScalingWeights ret;
773         ret.src_bilinear_samples = src_samples;
774         ret.dst_samples = dst_samples;
775         ret.int_radius = int_radius;
776         ret.scaling_factor = scaling_factor;
777         ret.num_loops = num_loops;
778         ret.bilinear_weights_fp16 = nullptr;
779         ret.bilinear_weights_fp32 = move(weights);
780         ret.raw_weights = nullptr;
781         return ret;
782 }
783
784 }  // namespace
785
786 ScalingWeights calculate_bilinear_scaling_weights(unsigned src_size, unsigned dst_size, float zoom, float offset, BilinearFormatConstraints constraints)
787 {
788         ScalingWeights ret = calculate_scaling_weights(src_size, dst_size, zoom, offset);
789         unique_ptr<Tap<float>[]> weights = move(ret.bilinear_weights_fp32);
790         const int src_samples = ret.src_bilinear_samples;
791
792         // Now make use of the bilinear filtering in the GPU to reduce the number of samples
793         // we need to make. Try fp16 first; if it's not accurate enough, we go to fp32.
794         // Our tolerance level for total error is a bit higher than the one for invididual
795         // samples, since one would assume overall errors in the shape don't matter as much.
796         const float max_error = 2.0f / (255.0f * 255.0f);
797         unique_ptr<Tap<fp16_int_t>[]> bilinear_weights_fp16;
798         unique_ptr<Tap<float>[]> bilinear_weights_fp32;
799         double max_sum_sq_error_fp16 = 0.0;
800         int src_bilinear_samples;
801         if (constraints == BilinearFormatConstraints::ALLOW_FP32_ONLY) {
802                 max_sum_sq_error_fp16 = numeric_limits<double>::max();
803         } else {
804                 assert(constraints == BilinearFormatConstraints::ALLOW_FP16_AND_FP32);
805                 src_bilinear_samples = combine_many_samples(weights.get(), src_size, src_samples, ret.dst_samples, &bilinear_weights_fp16);
806                 for (unsigned y = 0; y < ret.dst_samples; ++y) {
807                         double sum_sq_error_fp16 = compute_sum_sq_error(
808                                 weights.get() + y * src_samples, src_samples,
809                                 bilinear_weights_fp16.get() + y * src_bilinear_samples, src_bilinear_samples,
810                                 src_size);
811                         max_sum_sq_error_fp16 = std::max(max_sum_sq_error_fp16, sum_sq_error_fp16);
812                         if (max_sum_sq_error_fp16 > max_error) {
813                                 break;
814                         }
815                 }
816         }
817
818         if (max_sum_sq_error_fp16 > max_error) {
819                 bilinear_weights_fp16.reset();
820                 src_bilinear_samples = combine_many_samples(weights.get(), src_size, src_samples, ret.dst_samples, &bilinear_weights_fp32);
821         }
822
823         ret.src_bilinear_samples = src_bilinear_samples;
824         ret.bilinear_weights_fp16 = move(bilinear_weights_fp16);
825         ret.bilinear_weights_fp32 = move(bilinear_weights_fp32);
826         return ret;
827 }
828
829 // Unlike calculate_bilinear_scaling_weights(), this just converts the weights,
830 // without any combining trickery. Thus, it is also much faster.
831 ScalingWeights calculate_raw_scaling_weights(unsigned src_size, unsigned dst_size, float zoom, float offset)
832 {
833         ScalingWeights ret = calculate_scaling_weights(src_size, dst_size, zoom, offset);
834         unique_ptr<Tap<float>[]> weights = move(ret.bilinear_weights_fp32);
835         const int src_samples = ret.src_bilinear_samples;
836
837         // Convert to fp16 (without any positions, as they are calculated implicitly
838         // by the compute shader) and normalize.
839         unique_ptr<fp16_int_t[]> raw_weights(new fp16_int_t[ret.dst_samples * src_samples]);
840         for (unsigned y = 0; y < ret.dst_samples; ++y) {
841                 for (int i = 0; i < src_samples; ++i) {
842                         raw_weights[y * src_samples + i] = fp32_to_fp16(weights[y * src_samples + i].weight);
843                 }
844                 normalize_sum(raw_weights.get() + y * src_samples, src_samples);
845         }
846
847         ret.raw_weights = move(raw_weights);
848         return ret;
849 }
850
851 void SingleResamplePassEffect::set_gl_state(GLuint glsl_program_num, const string &prefix, unsigned *sampler_num)
852 {
853         Effect::set_gl_state(glsl_program_num, prefix, sampler_num);
854
855         assert(input_width > 0);
856         assert(input_height > 0);
857         assert(output_width > 0);
858         assert(output_height > 0);
859
860         if (input_width != last_input_width ||
861             input_height != last_input_height ||
862             output_width != last_output_width ||
863             output_height != last_output_height ||
864             offset != last_offset ||
865             zoom != last_zoom) {
866                 update_texture(glsl_program_num, prefix, sampler_num);
867                 last_input_width = input_width;
868                 last_input_height = input_height;
869                 last_output_width = output_width;
870                 last_output_height = output_height;
871                 last_offset = offset;
872                 last_zoom = zoom;
873         }
874
875         glActiveTexture(GL_TEXTURE0 + *sampler_num);
876         check_error();
877         glBindTexture(GL_TEXTURE_2D, tex.get_texnum());
878         check_error();
879
880         uniform_sample_tex = *sampler_num;
881         ++*sampler_num;
882         uniform_num_samples = src_bilinear_samples;
883         uniform_num_loops = num_loops;
884         uniform_slice_height = slice_height;
885
886         // Instructions for how to convert integer sample numbers to positions in the weight texture.
887         uniform_sample_x_scale = 1.0f / src_bilinear_samples;
888         uniform_sample_x_offset = 0.5f / src_bilinear_samples;
889
890         if (direction == SingleResamplePassEffect::VERTICAL) {
891                 uniform_whole_pixel_offset = lrintf(offset) / float(input_height);
892         } else {
893                 uniform_whole_pixel_offset = lrintf(offset) / float(input_width);
894         }
895 }
896
897 Support2DTexture::Support2DTexture()
898 {
899         glGenTextures(1, &texnum);
900         check_error();
901         glBindTexture(GL_TEXTURE_2D, texnum);
902         check_error();
903         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
904         check_error();
905         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
906         check_error();
907         glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
908         check_error();
909 }
910
911 Support2DTexture::~Support2DTexture()
912 {
913         glDeleteTextures(1, &texnum);
914         check_error();
915 }
916
917 void Support2DTexture::update(GLint width, GLint height, GLenum internal_format, GLenum format, GLenum type, const GLvoid * data)
918 {
919         glBindTexture(GL_TEXTURE_2D, texnum);
920         check_error();
921         if (width == last_texture_width &&
922             height == last_texture_height &&
923             internal_format == last_texture_internal_format) {
924                 // Texture dimensions and type are unchanged; it is more efficient
925                 // to just update it rather than making an entirely new texture.
926                 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, format, type, data);
927                 check_error();
928         } else {
929                 glTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0, format, type, data);
930                 check_error();
931                 last_texture_width = width;
932                 last_texture_height = height;
933                 last_texture_internal_format = internal_format;
934         }
935 }
936
937 void ResampleComputeEffect::get_compute_dimensions(unsigned output_width, unsigned output_height,
938                                                    unsigned *x, unsigned *y, unsigned *z) const
939 {
940         *x = output_width;
941         *y = (output_height + uniform_output_samples_per_block - 1) / uniform_output_samples_per_block;
942         *z = 1;
943 }
944
945 void ResampleComputeEffect::set_gl_state(GLuint glsl_program_num, const string &prefix, unsigned *sampler_num)
946 {
947         Effect::set_gl_state(glsl_program_num, prefix, sampler_num);
948
949         assert(input_width > 0);
950         assert(input_height > 0);
951         assert(output_width > 0);
952         assert(output_height > 0);
953
954         if (input_width != last_input_width ||
955             input_height != last_input_height ||
956             output_width != last_output_width ||
957             output_height != last_output_height ||
958             offset_x != last_offset_x ||
959             offset_y != last_offset_y ||
960             zoom_x != last_zoom_x ||
961             zoom_x != last_zoom_y) {
962                 update_texture(glsl_program_num, prefix, sampler_num);
963                 last_input_width = input_width;
964                 last_input_height = input_height;
965                 last_output_width = output_width;
966                 last_output_height = output_height;
967                 last_offset_x = offset_x;
968                 last_offset_y = offset_y;
969                 last_zoom_x = zoom_x;
970                 last_zoom_y = zoom_y;
971         }
972
973         glActiveTexture(GL_TEXTURE0 + *sampler_num);
974         check_error();
975         glBindTexture(GL_TEXTURE_2D, tex_horiz.get_texnum());
976         check_error();
977         uniform_sample_tex_horizontal = *sampler_num;
978         ++*sampler_num;
979
980         glActiveTexture(GL_TEXTURE0 + *sampler_num);
981         check_error();
982         glBindTexture(GL_TEXTURE_2D, tex_vert.get_texnum());
983         check_error();
984         uniform_sample_tex_vertical = *sampler_num;
985         ++*sampler_num;
986
987         uniform_num_horizontal_samples = src_horizontal_bilinear_samples;
988         uniform_num_vertical_samples = src_vertical_samples;
989         uniform_slice_height = slice_height;
990
991         uniform_horizontal_whole_pixel_offset = lrintf(offset_x) / float(input_width);
992         uniform_vertical_whole_pixel_offset = lrintf(offset_y);
993
994         uniform_inv_input_height = 1.0f / float(input_height);
995         uniform_input_texcoord_y_adjust = 0.5f / float(input_height);
996 }
997
998 }  // namespace movit