+ vec2 image_size = textureSize(grad_tex, 0).xy;
+
+ // Lock the patch center to an integer, so that we never get
+ // any bilinear artifacts for the gradient. (NOTE: This assumes an
+ // even patch size.) Then calculate the bottom-left texel of the patch.
+ vec2 base = (round(patch_center * image_size) - (0.5f * patch_size - 0.5f))
+ * inv_image_size;
+
+ // First, precompute the pseudo-Hessian for the template patch.
+ // This is the part where we really save by the inverse search
+ // (ie., we can compute it up-front instead of anew for each
+ // patch).
+ //
+ // H = sum(S^T S)
+ //
+ // where S is the gradient at each point in the patch. Note that
+ // this is an outer product, so we get a (symmetric) 2x2 matrix,
+ // not a scalar.
+ mat2 H = mat2(0.0f);
+ vec2 grad_sum = vec2(0.0f); // Used for patch normalization.
+ float template_sum = 0.0f;
+ for (uint y = 0; y < patch_size; ++y) {
+ for (uint x = 0; x < patch_size; ++x) {
+ vec2 tc = base + uvec2(x, y) * inv_image_size;
+ vec3 grad = get_gradients(vec3(tc, ref_layer));
+ H[0][0] += grad.x * grad.x;
+ H[1][1] += grad.y * grad.y;
+ H[0][1] += grad.x * grad.y;
+
+ template_sum += grad.z; // The actual template pixel value.
+ grad_sum += grad.xy;
+ }
+ }
+ H[1][0] = H[0][1];
+
+ // Make sure we don't get a singular matrix even if e.g. the picture is
+ // all black. (The paper doesn't mention this, but the reference code
+ // does it, and it seems like a reasonable hack to avoid NaNs. With such
+ // a H, we'll go out-of-bounds pretty soon, though.)
+ if (determinant(H) < 1e-6) {
+ H[0][0] += 1e-6;
+ H[1][1] += 1e-6;
+ }
+
+ mat2 H_inv = inverse(H);
+
+ // Fetch the initial guess for the flow, and convert from the previous size to this one.
+ vec2 initial_u = texture(flow_tex, flow_tc).xy * (image_size * inv_prev_level_size);
+ vec2 u = initial_u;
+ float mean_diff, first_mean_diff;
+
+ for (uint i = 0; i < num_iterations; ++i) {
+ vec2 du = vec2(0.0, 0.0);
+ float warped_sum = 0.0f;
+ vec2 u_norm = u * inv_image_size; // In [0..1] coordinates instead of pixels.
+ for (uint y = 0; y < patch_size; ++y) {
+ for (uint x = 0; x < patch_size; ++x) {
+ vec2 tc = base + uvec2(x, y) * inv_image_size;
+ vec3 grad = get_gradients(vec3(tc, ref_layer));
+ float t = grad.z;
+ float warped = texture(image_tex, vec3(tc + u_norm, search_layer)).x;
+ du += grad.xy * (warped - t);
+ warped_sum += warped;
+ }
+ }
+
+ // Subtract the mean for patch normalization. We've done our
+ // sums without subtracting the means (because we didn't know them
+ // beforehand), ie.:
+ //
+ // sum(S^T * ((x + µ1) - (y + µ2))) = sum(S^T * (x - y)) + (µ1 – µ2) sum(S^T)
+ //
+ // which gives trivially
+ //
+ // sum(S^T * (x - y)) = [what we calculated] - (µ1 - µ2) sum(S^T)
+ //
+ // so we can just subtract away the mean difference here.
+ mean_diff = (warped_sum - template_sum) * (1.0 / float(patch_size * patch_size));
+ du -= grad_sum * mean_diff;
+
+ if (i == 0) {
+ first_mean_diff = mean_diff;
+ }
+
+ // Do the actual update.
+ u -= H_inv * du;
+ }
+
+ // Reject if we moved too far. Note that the paper says “too far” is the
+ // patch size, but the DIS code uses half of a patch size. The latter seems
+ // to give much better overall results.
+ //
+ // Also reject if the patch goes out-of-bounds (the paper does not mention this,
+ // but the code does, and it seems to be critical to avoid really bad behavior
+ // at the edges).
+ vec2 patch_center = (base * image_size - 0.5f) + patch_size * 0.5f + u;
+ if (length(u - initial_u) > (patch_size * 0.5f) ||
+ patch_center.x < -(patch_size * 0.5f) ||
+ image_size.x - patch_center.x < -(patch_size * 0.5f) ||
+ patch_center.y < -(patch_size * 0.5f) ||
+ image_size.y - patch_center.y < -(patch_size * 0.5f)) {
+ u = initial_u;
+ mean_diff = first_mean_diff;
+ }
+
+ // NOTE: The mean patch diff will be for the second-to-last patch,
+ // not the true position of du. But hopefully, it will be very close.
+ u *= inv_image_size;
+ out_flow = vec3(u.x, u.y, mean_diff);