-POWERPC_PERF_START_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
-
- temp1 = vec_ld(0, pixels);
- temp2 = vec_ld(16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
- if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F)
- {
- pixelsv2 = temp2;
- }
- else
- {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
- }
- pixelsv3 = vec_mergel(vczero, pixelsv1);
- pixelsv4 = vec_mergel(vczero, pixelsv2);
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
- pixelssum3 = vec_add((vector unsigned short)pixelsv3,
- (vector unsigned short)pixelsv4);
- pixelssum3 = vec_add(pixelssum3, vcone);
- pixelssum1 = vec_add((vector unsigned short)pixelsv1,
- (vector unsigned short)pixelsv2);
- pixelssum1 = vec_add(pixelssum1, vcone);
-
- for (i = 0; i < h ; i++) {
- blockv = vec_ld(0, block);
-
- temp1 = vec_ld(line_size, pixels);
- temp2 = vec_ld(line_size + 16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
- if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F)
- {
- pixelsv2 = temp2;
- }
- else
- {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
- }
-
- pixelsv3 = vec_mergel(vczero, pixelsv1);
- pixelsv4 = vec_mergel(vczero, pixelsv2);
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
-
- pixelssum4 = vec_add((vector unsigned short)pixelsv3,
- (vector unsigned short)pixelsv4);
- pixelssum2 = vec_add((vector unsigned short)pixelsv1,
- (vector unsigned short)pixelsv2);
- temp4 = vec_add(pixelssum3, pixelssum4);
- temp4 = vec_sra(temp4, vctwo);
- temp3 = vec_add(pixelssum1, pixelssum2);
- temp3 = vec_sra(temp3, vctwo);
-
- pixelssum3 = vec_add(pixelssum4, vcone);
- pixelssum1 = vec_add(pixelssum2, vcone);
-
- blockv = vec_packsu(temp3, temp4);
-
- vec_st(blockv, 0, block);
-
- block += line_size;
- pixels += line_size;
- }
-
-POWERPC_PERF_STOP_COUNT(altivec_put_no_rnd_pixels16_xy2_num, 1);
-#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
+/*
+16x8 works with 16 elements; it allows to avoid replicating loads, and
+give the compiler more rooms for scheduling. It's only used from
+inside hadamard8_diff16_altivec.
+
+Unfortunately, it seems gcc-3.3 is a bit dumb, and the compiled code has a LOT
+of spill code, it seems gcc (unlike xlc) cannot keep everything in registers
+by itself. The following code include hand-made registers allocation. It's not
+clean, but on a 7450 the resulting code is much faster (best case fall from
+700+ cycles to 550).
+
+xlc doesn't add spill code, but it doesn't know how to schedule for the 7450,
+and its code isn't much faster than gcc-3.3 on the 7450 (but uses 25% less
+instructions...)
+
+On the 970, the hand-made RA is still a win (around 690 vs. around 780), but
+xlc goes to around 660 on the regular C code...
+*/
+
+static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h) {
+ int sum;
+ register vector signed short
+ temp0 __asm__ ("v0"),
+ temp1 __asm__ ("v1"),
+ temp2 __asm__ ("v2"),
+ temp3 __asm__ ("v3"),
+ temp4 __asm__ ("v4"),
+ temp5 __asm__ ("v5"),
+ temp6 __asm__ ("v6"),
+ temp7 __asm__ ("v7");
+ register vector signed short
+ temp0S __asm__ ("v8"),
+ temp1S __asm__ ("v9"),
+ temp2S __asm__ ("v10"),
+ temp3S __asm__ ("v11"),
+ temp4S __asm__ ("v12"),
+ temp5S __asm__ ("v13"),
+ temp6S __asm__ ("v14"),
+ temp7S __asm__ ("v15");
+ register const vector unsigned char vzero __asm__ ("v31") =
+ (const vector unsigned char)vec_splat_u8(0);
+ {
+ register const vector signed short vprod1 __asm__ ("v16") =
+ (const vector signed short){ 1,-1, 1,-1, 1,-1, 1,-1 };
+ register const vector signed short vprod2 __asm__ ("v17") =
+ (const vector signed short){ 1, 1,-1,-1, 1, 1,-1,-1 };
+ register const vector signed short vprod3 __asm__ ("v18") =
+ (const vector signed short){ 1, 1, 1, 1,-1,-1,-1,-1 };
+ register const vector unsigned char perm1 __asm__ ("v19") =
+ (const vector unsigned char)
+ {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
+ 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
+ register const vector unsigned char perm2 __asm__ ("v20") =
+ (const vector unsigned char)
+ {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
+ register const vector unsigned char perm3 __asm__ ("v21") =
+ (const vector unsigned char)
+ {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
+
+#define ONEITERBUTTERFLY(i, res1, res2) \
+ { \
+ register vector unsigned char src1 __asm__ ("v22"), \
+ src2 __asm__ ("v23"), \
+ dst1 __asm__ ("v24"), \
+ dst2 __asm__ ("v25"), \
+ srcO __asm__ ("v22"), \
+ dstO __asm__ ("v23"); \
+ \
+ register vector signed short srcV __asm__ ("v24"), \
+ dstV __asm__ ("v25"), \
+ srcW __asm__ ("v26"), \
+ dstW __asm__ ("v27"), \
+ but0 __asm__ ("v28"), \
+ but0S __asm__ ("v29"), \
+ op1 __asm__ ("v30"), \
+ but1 __asm__ ("v22"), \
+ op1S __asm__ ("v23"), \
+ but1S __asm__ ("v24"), \
+ op2 __asm__ ("v25"), \
+ but2 __asm__ ("v26"), \
+ op2S __asm__ ("v27"), \
+ but2S __asm__ ("v28"), \
+ op3 __asm__ ("v29"), \
+ op3S __asm__ ("v30"); \
+ \
+ src1 = vec_ld(stride * i, src); \
+ src2 = vec_ld((stride * i) + 16, src); \
+ srcO = vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
+ dst1 = vec_ld(stride * i, dst); \
+ dst2 = vec_ld((stride * i) + 16, dst); \
+ dstO = vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
+ /* promote the unsigned chars to signed shorts */ \
+ srcV = (vector signed short)vec_mergeh((vector signed char)vzero, \
+ (vector signed char)srcO); \
+ dstV = (vector signed short)vec_mergeh((vector signed char)vzero, \
+ (vector signed char)dstO); \
+ srcW = (vector signed short)vec_mergel((vector signed char)vzero, \
+ (vector signed char)srcO); \
+ dstW = (vector signed short)vec_mergel((vector signed char)vzero, \
+ (vector signed char)dstO); \
+ /* subtractions inside the first butterfly */ \
+ but0 = vec_sub(srcV, dstV); \
+ but0S = vec_sub(srcW, dstW); \
+ op1 = vec_perm(but0, but0, perm1); \
+ but1 = vec_mladd(but0, vprod1, op1); \
+ op1S = vec_perm(but0S, but0S, perm1); \
+ but1S = vec_mladd(but0S, vprod1, op1S); \
+ op2 = vec_perm(but1, but1, perm2); \
+ but2 = vec_mladd(but1, vprod2, op2); \
+ op2S = vec_perm(but1S, but1S, perm2); \
+ but2S = vec_mladd(but1S, vprod2, op2S); \
+ op3 = vec_perm(but2, but2, perm3); \
+ res1 = vec_mladd(but2, vprod3, op3); \
+ op3S = vec_perm(but2S, but2S, perm3); \
+ res2 = vec_mladd(but2S, vprod3, op3S); \
+ }
+ ONEITERBUTTERFLY(0, temp0, temp0S);
+ ONEITERBUTTERFLY(1, temp1, temp1S);
+ ONEITERBUTTERFLY(2, temp2, temp2S);
+ ONEITERBUTTERFLY(3, temp3, temp3S);
+ ONEITERBUTTERFLY(4, temp4, temp4S);
+ ONEITERBUTTERFLY(5, temp5, temp5S);
+ ONEITERBUTTERFLY(6, temp6, temp6S);
+ ONEITERBUTTERFLY(7, temp7, temp7S);
+ }
+#undef ONEITERBUTTERFLY
+ {
+ register vector signed int vsum;
+ register vector signed short line0S, line1S, line2S, line3S, line4S,
+ line5S, line6S, line7S, line0BS,line2BS,
+ line1BS,line3BS,line4BS,line6BS,line5BS,
+ line7BS,line0CS,line4CS,line1CS,line5CS,
+ line2CS,line6CS,line3CS,line7CS;
+
+ register vector signed short line0 = vec_add(temp0, temp1);
+ register vector signed short line1 = vec_sub(temp0, temp1);
+ register vector signed short line2 = vec_add(temp2, temp3);
+ register vector signed short line3 = vec_sub(temp2, temp3);
+ register vector signed short line4 = vec_add(temp4, temp5);
+ register vector signed short line5 = vec_sub(temp4, temp5);
+ register vector signed short line6 = vec_add(temp6, temp7);
+ register vector signed short line7 = vec_sub(temp6, temp7);
+
+ register vector signed short line0B = vec_add(line0, line2);
+ register vector signed short line2B = vec_sub(line0, line2);
+ register vector signed short line1B = vec_add(line1, line3);
+ register vector signed short line3B = vec_sub(line1, line3);
+ register vector signed short line4B = vec_add(line4, line6);
+ register vector signed short line6B = vec_sub(line4, line6);
+ register vector signed short line5B = vec_add(line5, line7);
+ register vector signed short line7B = vec_sub(line5, line7);
+
+ register vector signed short line0C = vec_add(line0B, line4B);
+ register vector signed short line4C = vec_sub(line0B, line4B);
+ register vector signed short line1C = vec_add(line1B, line5B);
+ register vector signed short line5C = vec_sub(line1B, line5B);
+ register vector signed short line2C = vec_add(line2B, line6B);
+ register vector signed short line6C = vec_sub(line2B, line6B);
+ register vector signed short line3C = vec_add(line3B, line7B);
+ register vector signed short line7C = vec_sub(line3B, line7B);
+
+ vsum = vec_sum4s(vec_abs(line0C), vec_splat_s32(0));
+ vsum = vec_sum4s(vec_abs(line1C), vsum);
+ vsum = vec_sum4s(vec_abs(line2C), vsum);
+ vsum = vec_sum4s(vec_abs(line3C), vsum);
+ vsum = vec_sum4s(vec_abs(line4C), vsum);
+ vsum = vec_sum4s(vec_abs(line5C), vsum);
+ vsum = vec_sum4s(vec_abs(line6C), vsum);
+ vsum = vec_sum4s(vec_abs(line7C), vsum);
+
+ line0S = vec_add(temp0S, temp1S);
+ line1S = vec_sub(temp0S, temp1S);
+ line2S = vec_add(temp2S, temp3S);
+ line3S = vec_sub(temp2S, temp3S);
+ line4S = vec_add(temp4S, temp5S);
+ line5S = vec_sub(temp4S, temp5S);
+ line6S = vec_add(temp6S, temp7S);
+ line7S = vec_sub(temp6S, temp7S);
+
+ line0BS = vec_add(line0S, line2S);
+ line2BS = vec_sub(line0S, line2S);
+ line1BS = vec_add(line1S, line3S);
+ line3BS = vec_sub(line1S, line3S);
+ line4BS = vec_add(line4S, line6S);
+ line6BS = vec_sub(line4S, line6S);
+ line5BS = vec_add(line5S, line7S);
+ line7BS = vec_sub(line5S, line7S);
+
+ line0CS = vec_add(line0BS, line4BS);
+ line4CS = vec_sub(line0BS, line4BS);
+ line1CS = vec_add(line1BS, line5BS);
+ line5CS = vec_sub(line1BS, line5BS);
+ line2CS = vec_add(line2BS, line6BS);
+ line6CS = vec_sub(line2BS, line6BS);
+ line3CS = vec_add(line3BS, line7BS);
+ line7CS = vec_sub(line3BS, line7BS);
+
+ vsum = vec_sum4s(vec_abs(line0CS), vsum);
+ vsum = vec_sum4s(vec_abs(line1CS), vsum);
+ vsum = vec_sum4s(vec_abs(line2CS), vsum);
+ vsum = vec_sum4s(vec_abs(line3CS), vsum);
+ vsum = vec_sum4s(vec_abs(line4CS), vsum);
+ vsum = vec_sum4s(vec_abs(line5CS), vsum);
+ vsum = vec_sum4s(vec_abs(line6CS), vsum);
+ vsum = vec_sum4s(vec_abs(line7CS), vsum);
+ vsum = vec_sums(vsum, (vector signed int)vzero);
+ vsum = vec_splat(vsum, 3);
+ vec_ste(vsum, 0, &sum);
+ }
+ return sum;
+}
+
+int hadamard8_diff16_altivec(/*MpegEncContext*/ void *s, uint8_t *dst, uint8_t *src, int stride, int h){
+POWERPC_PERF_DECLARE(altivec_hadamard8_diff16_num, 1);
+ int score;
+POWERPC_PERF_START_COUNT(altivec_hadamard8_diff16_num, 1);
+ score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
+ if (h==16) {
+ dst += 8*stride;
+ src += 8*stride;
+ score += hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
+ }
+POWERPC_PERF_STOP_COUNT(altivec_hadamard8_diff16_num, 1);
+ return score;