out vec2 out_flow;
uniform sampler2D flow_tex, grad0_tex, image0_tex, image1_tex;
-uniform float image_width, image_height, inv_image_width, inv_image_height;
+uniform vec2 image_size, inv_image_size;
void main()
{
// Lock patch_bottom_left_texel to an integer, so that we never get
// any bilinear artifacts for the gradient.
- vec2 base = round(patch_bottom_left_texel * vec2(image_width, image_height))
- * vec2(inv_image_width, inv_image_height);
+ vec2 base = round(patch_bottom_left_texel * image_size)
+ * inv_image_size;
// First, precompute the pseudo-Hessian for the template patch.
// This is the part where we really save by the inverse search
// this is an outer product, so we get a (symmetric) 2x2 matrix,
// not a scalar.
mat2 H = mat2(0.0f);
+ vec2 grad_sum = vec2(0.0f); // Used for patch normalization.
+ float template_sum = 0.0f;
for (uint y = 0; y < patch_size; ++y) {
for (uint x = 0; x < patch_size; ++x) {
- vec2 tc;
- tc.x = base.x + x * inv_image_width;
- tc.y = base.y + y * inv_image_height;
+ vec2 tc = base + uvec2(x, y) * inv_image_size;
vec2 grad = texture(grad0_tex, tc).xy;
H[0][0] += grad.x * grad.x;
H[1][1] += grad.y * grad.y;
H[0][1] += grad.x * grad.y;
+
+ template_sum += texture(image0_tex, tc).x;
+ grad_sum += grad;
}
}
H[1][0] = H[0][1];
mat2 H_inv = inverse(H);
- // Fetch the initial guess for the flow.
- vec2 initial_u = texture(flow_tex, flow_tc).xy;
+ // Fetch the initial guess for the flow. (We need the normalization step
+ // because densification works by accumulating; see the comments on the
+ // Densify class.)
+ vec3 prev_flow = texture(flow_tex, flow_tc).xyz;
+ vec2 initial_u;
+ if (prev_flow.z < 1e-3) {
+ initial_u = vec2(0.0, 0.0);
+ } else {
+ initial_u = prev_flow.xy / prev_flow.z;
+ }
+
+ // Note: The flow is in OpenGL coordinates [0..1], but the calculations
+ // generally come out in pixels since the gradient is in pixels,
+ // so we need to convert at the end.
vec2 u = initial_u;
for (uint i = 0; i < num_iterations; ++i) {
vec2 du = vec2(0.0, 0.0);
+ float warped_sum = 0.0f;
for (uint y = 0; y < patch_size; ++y) {
for (uint x = 0; x < patch_size; ++x) {
- vec2 tc;
- tc.x = base.x + x * inv_image_width;
- tc.y = base.y + y * inv_image_height;
+ vec2 tc = base + uvec2(x, y) * inv_image_size;
vec2 grad = texture(grad0_tex, tc).xy;
float t = texture(image0_tex, tc).x;
float warped = texture(image1_tex, tc + u).x;
du += grad * (warped - t);
+ warped_sum += warped;
}
}
- u += H_inv * du * vec2(inv_image_width, inv_image_height);
+
+ // Subtract the mean for patch normalization. We've done our
+ // sums without subtracting the means (because we didn't know them
+ // beforehand), ie.:
+ //
+ // sum(S^T * ((x + µ1) - (y + µ2))) = sum(S^T * (x - y)) + (µ1 – µ2) sum(S^T)
+ //
+ // which gives trivially
+ //
+ // sum(S^T * (x - y)) = [what we calculated] - (µ1 - µ2) sum(S^T)
+ //
+ // so we can just subtract away the mean difference here.
+ du -= grad_sum * (warped_sum - template_sum) * (1.0 / (patch_size * patch_size));
+
+ // Do the actual update.
+ u -= (H_inv * du) * inv_image_size;
}
- // Reject if we moved too far.
- if (length((u - initial_u) * vec2(image_width, image_height)) > patch_size) {
+ // Reject if we moved too far. Also reject if the patch goes out-of-bounds
+ // (the paper does not mention this, but the code does, and it seems to be
+ // critical to avoid really bad behavior at the edges).
+ if ((length((u - initial_u) * image_size) > patch_size) ||
+ u.x * image_size.x < -(patch_size * 0.5f) ||
+ (1.0 - u.x) * image_size.x < -(patch_size * 0.5f) ||
+ u.y * image_size.y < -(patch_size * 0.5f) ||
+ (1.0 - u.y) * image_size.y < -(patch_size * 0.5f)) {
u = initial_u;
}