#version 440 #extension GL_ARB_shader_clock : enable // Do sixteen 8x8 blocks in a local group, because that matches up perfectly // with needing 1024 coefficients for our four histograms (of 256 bins each). #define NUM_Z 16 layout(local_size_x = 8, local_size_z = NUM_Z) in; layout(r16ui) uniform restrict writeonly uimage2D dc_ac7_tex; layout(r16ui) uniform restrict writeonly uimage2D ac1_ac6_tex; layout(r16ui) uniform restrict writeonly uimage2D ac2_ac5_tex; layout(r8i) uniform restrict writeonly iimage2D ac3_tex; layout(r8i) uniform restrict writeonly iimage2D ac4_tex; layout(r8ui) uniform restrict readonly uimage2D image_tex; shared uint temp[64 * NUM_Z]; layout(std430, binding = 9) buffer layoutName { uint dist[4 * 256]; }; #define MAPPING(s0, s1, s2, s3, s4, s5, s6, s7) ((s0) | (s1 << 2) | (s2 << 4) | (s3 << 6) | (s4 << 8) | (s5 << 10) | (s6 << 12) | (s7 << 14)) const uint luma_mapping[8] = { MAPPING(0, 0, 1, 1, 2, 2, 3, 3), MAPPING(0, 0, 1, 2, 2, 2, 3, 3), MAPPING(1, 1, 2, 2, 2, 3, 3, 3), MAPPING(1, 1, 2, 2, 2, 3, 3, 3), MAPPING(1, 2, 2, 2, 2, 3, 3, 3), MAPPING(2, 2, 2, 2, 3, 3, 3, 3), MAPPING(2, 2, 3, 3, 3, 3, 3, 3), MAPPING(3, 3, 3, 3, 3, 3, 3, 3), }; // Scale factors; 1.0 / (sqrt(2.0) * cos(k * M_PI / 16.0)), except for the first which is 1. const float sf[8] = { 1.0, 0.7209598220069479, 0.765366864730180, 0.8504300947672564, 1.0, 1.2727585805728336, 1.847759065022573, 3.6245097854115502 }; const float W[64] = { 8, 16, 19, 22, 26, 27, 29, 34, 16, 16, 22, 24, 27, 29, 34, 37, 19, 22, 26, 27, 29, 34, 34, 38, 22, 22, 26, 27, 29, 34, 37, 40, 22, 26, 27, 29, 32, 35, 40, 48, 26, 27, 29, 32, 35, 40, 48, 58, 26, 27, 29, 34, 38, 46, 56, 69, 27, 29, 35, 38, 46, 56, 69, 83 }; const float S = 4.0 * 0.5; // whatever? // NOTE: Contains factors to counteract the scaling in the DCT implementation. #define QM(x, y) (sf[x] * sf[y] / (W[y*8 + x] * S)) const float quant_matrix[64] = { 1.0 / 64.0, QM(1, 0), QM(2, 0), QM(3, 0), QM(4, 0), QM(5, 0), QM(6, 0), QM(7, 0), QM(0, 1), QM(1, 1), QM(2, 1), QM(3, 1), QM(4, 1), QM(5, 1), QM(6, 1), QM(7, 1), QM(0, 2), QM(1, 2), QM(2, 2), QM(3, 2), QM(4, 2), QM(5, 2), QM(6, 2), QM(7, 2), QM(0, 3), QM(1, 3), QM(2, 3), QM(3, 3), QM(4, 3), QM(5, 3), QM(6, 3), QM(7, 3), QM(0, 4), QM(1, 4), QM(2, 4), QM(3, 4), QM(4, 4), QM(5, 4), QM(6, 4), QM(7, 4), QM(0, 5), QM(1, 5), QM(2, 5), QM(3, 5), QM(4, 5), QM(5, 5), QM(6, 5), QM(7, 5), QM(0, 6), QM(1, 6), QM(2, 6), QM(3, 6), QM(4, 6), QM(5, 6), QM(6, 6), QM(7, 6), QM(0, 7), QM(1, 7), QM(2, 7), QM(3, 7), QM(4, 7), QM(5, 7), QM(6, 7), QM(7, 7) }; // Clamp and pack a 9-bit and a 7-bit signed value into a 16-bit word. uint pack_9_7(int v9, int v7) { return (uint(clamp(v9, -256, 255)) & 0x1ffu) | ((uint(clamp(v7, -64, 63)) & 0x7fu) << 9); } // Scaled 1D DCT (AA&N). y0 is correctly scaled, all other y_k are scaled by sqrt(2) cos(k * Pi / 16). void dct_1d(inout float y0, inout float y1, inout float y2, inout float y3, inout float y4, inout float y5, inout float y6, inout float y7) { const float a1 = 0.7071067811865474; // sqrt(2) const float a2 = 0.5411961001461971; // cos(3/8 pi) * sqrt(2) const float a4 = 1.3065629648763766; // cos(pi/8) * sqrt(2) // static const float a5 = 0.5 * (a4 - a2); const float a5 = 0.3826834323650897; // phase 1 const float p1_0 = y0 + y7; const float p1_1 = y1 + y6; const float p1_2 = y2 + y5; const float p1_3 = y3 + y4; const float p1_4 = y3 - y4; const float p1_5 = y2 - y5; const float p1_6 = y1 - y6; const float p1_7 = y0 - y7; // phase 2 const float p2_0 = p1_0 + p1_3; const float p2_1 = p1_1 + p1_2; const float p2_2 = p1_1 - p1_2; const float p2_3 = p1_0 - p1_3; const float p2_4 = p1_4 + p1_5; // Inverted. const float p2_5 = p1_5 + p1_6; const float p2_6 = p1_6 + p1_7; // phase 3 const float p3_0 = p2_0 + p2_1; const float p3_1 = p2_0 - p2_1; const float p3_2 = p2_2 + p2_3; // phase 4 const float p4_2 = p3_2 * a1; const float p4_4 = p2_4 * a2 + (p2_4 - p2_6) * a5; const float p4_5 = p2_5 * a1; const float p4_6 = p2_6 * a4 + (p2_4 - p2_6) * a5; // phase 5 const float p5_2 = p2_3 + p4_2; const float p5_3 = p2_3 - p4_2; const float p5_5 = p1_7 + p4_5; const float p5_7 = p1_7 - p4_5; // phase 6 y0 = p3_0; y4 = p3_1; y2 = p5_2; y6 = p5_3; y5 = p4_4 + p5_7; y1 = p5_5 + p4_6; y7 = p5_5 - p4_6; y3 = p5_7 - p4_4; } void main() { uint sx = gl_WorkGroupID.x * NUM_Z + gl_LocalInvocationID.z; uint x = 8 * sx; uint y = 8 * gl_WorkGroupID.y; uint n = gl_LocalInvocationID.x; uint z = gl_LocalInvocationID.z; // Load column. float y0 = imageLoad(image_tex, ivec2(x + n, y + 0)).x; float y1 = imageLoad(image_tex, ivec2(x + n, y + 1)).x; float y2 = imageLoad(image_tex, ivec2(x + n, y + 2)).x; float y3 = imageLoad(image_tex, ivec2(x + n, y + 3)).x; float y4 = imageLoad(image_tex, ivec2(x + n, y + 4)).x; float y5 = imageLoad(image_tex, ivec2(x + n, y + 5)).x; float y6 = imageLoad(image_tex, ivec2(x + n, y + 6)).x; float y7 = imageLoad(image_tex, ivec2(x + n, y + 7)).x; // Vertical DCT. dct_1d(y0, y1, y2, y3, y4, y5, y6, y7); // Communicate with the other shaders in the group. uint base_idx = 64 * z; temp[base_idx + 0 * 8 + n] = floatBitsToUint(y0); temp[base_idx + 1 * 8 + n] = floatBitsToUint(y1); temp[base_idx + 2 * 8 + n] = floatBitsToUint(y2); temp[base_idx + 3 * 8 + n] = floatBitsToUint(y3); temp[base_idx + 4 * 8 + n] = floatBitsToUint(y4); temp[base_idx + 5 * 8 + n] = floatBitsToUint(y5); temp[base_idx + 6 * 8 + n] = floatBitsToUint(y6); temp[base_idx + 7 * 8 + n] = floatBitsToUint(y7); memoryBarrierShared(); barrier(); // Load row (so transpose, in a sense). y0 = uintBitsToFloat(temp[base_idx + n * 8 + 0]); y1 = uintBitsToFloat(temp[base_idx + n * 8 + 1]); y2 = uintBitsToFloat(temp[base_idx + n * 8 + 2]); y3 = uintBitsToFloat(temp[base_idx + n * 8 + 3]); y4 = uintBitsToFloat(temp[base_idx + n * 8 + 4]); y5 = uintBitsToFloat(temp[base_idx + n * 8 + 5]); y6 = uintBitsToFloat(temp[base_idx + n * 8 + 6]); y7 = uintBitsToFloat(temp[base_idx + n * 8 + 7]); // Horizontal DCT. dct_1d(y0, y1, y2, y3, y4, y5, y6, y7); // Quantize. int c0 = int(round(y0 * quant_matrix[n * 8 + 0])); int c1 = int(round(y1 * quant_matrix[n * 8 + 1])); int c2 = int(round(y2 * quant_matrix[n * 8 + 2])); int c3 = int(round(y3 * quant_matrix[n * 8 + 3])); int c4 = int(round(y4 * quant_matrix[n * 8 + 4])); int c5 = int(round(y5 * quant_matrix[n * 8 + 5])); int c6 = int(round(y6 * quant_matrix[n * 8 + 6])); int c7 = int(round(y7 * quant_matrix[n * 8 + 7])); // Clamp, pack and store. imageStore(dc_ac7_tex, ivec2(sx, y + n), uvec4(pack_9_7(c0, c7), 0, 0, 0)); imageStore(ac1_ac6_tex, ivec2(sx, y + n), uvec4(pack_9_7(c1, c6), 0, 0, 0)); imageStore(ac2_ac5_tex, ivec2(sx, y + n), uvec4(pack_9_7(c2, c5), 0, 0, 0)); imageStore(ac3_tex, ivec2(sx, y + n), ivec4(c3, 0, 0, 0)); imageStore(ac4_tex, ivec2(sx, y + n), ivec4(c4, 0, 0, 0)); // Zero out the temporary area in preparation for counting up the histograms. base_idx += 8 * n; temp[base_idx + 0] = 0; temp[base_idx + 1] = 0; temp[base_idx + 2] = 0; temp[base_idx + 3] = 0; temp[base_idx + 4] = 0; temp[base_idx + 5] = 0; temp[base_idx + 6] = 0; temp[base_idx + 7] = 0; memoryBarrierShared(); barrier(); // Count frequencies into four histograms. We do this to local memory first, // because this is _much_ faster; then we do global atomic adds for the nonzero // members. // First take the absolute value (signs are encoded differently) and clamp, // as any value over 255 is going to be encoded as an escape. c0 = min(abs(c0), 255); c1 = min(abs(c1), 255); c2 = min(abs(c2), 255); c3 = min(abs(c3), 255); c4 = min(abs(c4), 255); c5 = min(abs(c5), 255); c6 = min(abs(c6), 255); c7 = min(abs(c7), 255); // Add up in local memory. uint m = luma_mapping[n]; atomicAdd(temp[bitfieldExtract(m, 0, 2) * 256 + c0], 1); atomicAdd(temp[bitfieldExtract(m, 2, 2) * 256 + c1], 1); atomicAdd(temp[bitfieldExtract(m, 4, 2) * 256 + c2], 1); atomicAdd(temp[bitfieldExtract(m, 6, 2) * 256 + c3], 1); atomicAdd(temp[bitfieldExtract(m, 8, 2) * 256 + c4], 1); atomicAdd(temp[bitfieldExtract(m, 10, 2) * 256 + c5], 1); atomicAdd(temp[bitfieldExtract(m, 12, 2) * 256 + c6], 1); atomicAdd(temp[bitfieldExtract(m, 14, 2) * 256 + c7], 1); memoryBarrierShared(); barrier(); // Add from local memory to global memory. if (temp[base_idx + 0] != 0) atomicAdd(dist[base_idx + 0], temp[base_idx + 0]); if (temp[base_idx + 1] != 0) atomicAdd(dist[base_idx + 1], temp[base_idx + 1]); if (temp[base_idx + 2] != 0) atomicAdd(dist[base_idx + 2], temp[base_idx + 2]); if (temp[base_idx + 3] != 0) atomicAdd(dist[base_idx + 3], temp[base_idx + 3]); if (temp[base_idx + 4] != 0) atomicAdd(dist[base_idx + 4], temp[base_idx + 4]); if (temp[base_idx + 5] != 0) atomicAdd(dist[base_idx + 5], temp[base_idx + 5]); if (temp[base_idx + 6] != 0) atomicAdd(dist[base_idx + 6], temp[base_idx + 6]); if (temp[base_idx + 7] != 0) atomicAdd(dist[base_idx + 7], temp[base_idx + 7]); }