X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=sor.frag;h=9a8e1e40aa0a32490091c5e2eb7b1b671ad3ff2e;hb=3795723be95f2fe82f3c8b8b45b1a905b2c811fd;hp=047526cb0899a32db07d9e8c143578b03a3e09e9;hpb=20989aaee7ca26bb7aede452c55597fbcf26ebb8;p=nageru diff --git a/sor.frag b/sor.frag index 047526c..9a8e1e4 100644 --- a/sor.frag +++ b/sor.frag @@ -1,15 +1,15 @@ #version 450 core -in vec2 tc, tc_left, tc_down; -in vec2 equation_tc_assuming_left, equation_tc_assuming_right; +in vec3 tc, tc_left, tc_down; +in vec3 equation_tc_assuming_left, equation_tc_assuming_right; in float element_x_idx, element_sum_idx; out vec2 diff_flow; -uniform sampler2D diff_flow_tex, diffusivity_tex; -uniform usampler2D equation_red_tex, equation_black_tex; +uniform sampler2DArray diff_flow_tex, diffusivity_tex; +uniform usampler2DArray equation_red_tex, equation_black_tex; uniform int phase; -uniform bool zero_diff_flow; +uniform int num_nonzero_phases; // See pack_floats_shared() in equations.frag. vec2 unpack_floats_shared(uint c) @@ -45,13 +45,14 @@ void main() // just immediately throws away half of the warp, but it helps convergence // a _lot_ (rough testing indicates that five iterations of SOR is as good // as ~50 iterations of Jacobi). We could probably do better by reorganizing - // the data into two-values-per-pixel, so-called “twinning buffering”, - // but it makes for rather annoying code in the rest of the pipeline. + // the data into two-values-per-pixel, so-called “twinned buffering”; + // seemingly, it helps Haswell by ~15% on the SOR code, but GTX 950 not at all + // (at least not on 720p). Presumably the latter is already bandwidth bound. int color = int(round(element_sum_idx)) & 1; if (color != phase) discard; uvec4 equation; - vec2 equation_tc; + vec3 equation_tc; if ((int(round(element_x_idx)) & 1) == 0) { equation_tc = equation_tc_assuming_left; } else { @@ -67,8 +68,12 @@ void main() float inv_A22 = uintBitsToFloat(equation.z); vec2 b = unpack_floats_shared(equation.w); - if (zero_diff_flow) { - diff_flow = vec2(0.0f); + const float omega = 1.8; // Marginally better than 1.6, it seems. + + if (num_nonzero_phases == 0) { + // Simplified version of the code below, assuming diff_flow == 0.0f everywhere. + diff_flow.x = omega * b.x * inv_A11; + diff_flow.y = omega * b.y * inv_A22; } else { // Subtract the missing terms from the right-hand side // (it couldn't be done earlier, because we didn't know @@ -82,14 +87,17 @@ void main() b += smooth_r * textureOffset(diff_flow_tex, tc, ivec2( 1, 0)).xy; b += smooth_d * textureOffset(diff_flow_tex, tc, ivec2( 0, -1)).xy; b += smooth_u * textureOffset(diff_flow_tex, tc, ivec2( 0, 1)).xy; - diff_flow = texture(diff_flow_tex, tc).xy; - } - const float omega = 1.8; // Marginally better than 1.6, it seems. + if (num_nonzero_phases == 1) { + diff_flow = vec2(0.0f); + } else { + diff_flow = texture(diff_flow_tex, tc).xy; + } - // From https://en.wikipedia.org/wiki/Successive_over-relaxation. - float sigma_u = A12 * diff_flow.y; - diff_flow.x += omega * ((b.x - sigma_u) * inv_A11 - diff_flow.x); - float sigma_v = A12 * diff_flow.x; - diff_flow.y += omega * ((b.y - sigma_v) * inv_A22 - diff_flow.y); + // From https://en.wikipedia.org/wiki/Successive_over-relaxation. + float sigma_u = A12 * diff_flow.y; + diff_flow.x += omega * ((b.x - sigma_u) * inv_A11 - diff_flow.x); + float sigma_v = A12 * diff_flow.x; + diff_flow.y += omega * ((b.y - sigma_v) * inv_A22 - diff_flow.y); + } }