*/
#include "h264pred_mips.h"
-#include "constants.h"
void ff_pred16x16_vertical_8_mmi(uint8_t *src, ptrdiff_t stride)
{
__asm__ volatile (
- "dsubu $2, %0, %1 \r\n"
- "daddu $3, %0, $0 \r\n"
- "ldl $4, 7($2) \r\n"
- "ldr $4, 0($2) \r\n"
- "ldl $5, 15($2) \r\n"
- "ldr $5, 8($2) \r\n"
- "dli $6, 0x10 \r\n"
+ "dli $8, 16 \r\n"
+ "gsldlc1 $f2, 7(%[srcA]) \r\n"
+ "gsldrc1 $f2, 0(%[srcA]) \r\n"
+ "gsldlc1 $f4, 15(%[srcA]) \r\n"
+ "gsldrc1 $f4, 8(%[srcA]) \r\n"
"1: \r\n"
- "sdl $4, 7($3) \r\n"
- "sdr $4, 0($3) \r\n"
- "sdl $5, 15($3) \r\n"
- "sdr $5, 8($3) \r\n"
- "daddu $3, %1 \r\n"
- "daddiu $6, -1 \r\n"
- "bnez $6, 1b \r\n"
- ::"r"(src),"r"(stride)
- : "$2","$3","$4","$5","$6","memory"
+ "gssdlc1 $f2, 7(%[src]) \r\n"
+ "gssdrc1 $f2, 0(%[src]) \r\n"
+ "gssdlc1 $f4, 15(%[src]) \r\n"
+ "gssdrc1 $f4, 8(%[src]) \r\n"
+ "daddu %[src], %[src], %[stride] \r\n"
+ "daddi $8, $8, -1 \r\n"
+ "bnez $8, 1b \r\n"
+ : [src]"+&r"(src)
+ : [stride]"r"(stride),[srcA]"r"(src-stride)
+ : "$8","$f2","$f4"
);
}
void ff_pred16x16_horizontal_8_mmi(uint8_t *src, ptrdiff_t stride)
{
__asm__ volatile (
- "daddiu $2, %0, -1 \r\n"
- "daddu $3, %0, $0 \r\n"
+ "daddiu $2, %[src], -1 \r\n"
+ "daddu $3, %[src], $0 \r\n"
"dli $6, 0x10 \r\n"
"1: \r\n"
"lbu $4, 0($2) \r\n"
- "dmul $5, $4, %2 \r\n"
+ "dmul $5, $4, %[ff_pb_1] \r\n"
"sdl $5, 7($3) \r\n"
"sdr $5, 0($3) \r\n"
"sdl $5, 15($3) \r\n"
"sdr $5, 8($3) \r\n"
- "daddu $2, %1 \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $2, %[stride] \r\n"
+ "daddu $3, %[stride] \r\n"
"daddiu $6, -1 \r\n"
"bnez $6, 1b \r\n"
- ::"r"(src),"r"(stride),"r"(ff_pb_1)
- : "$2","$3","$4","$5","$6","memory"
+ ::[src]"r"(src),[stride]"r"(stride),[ff_pb_1]"r"(ff_pb_1)
+ : "$2","$3","$4","$5","$6"
);
}
void ff_pred16x16_dc_8_mmi(uint8_t *src, ptrdiff_t stride)
{
__asm__ volatile (
- "daddiu $2, %0, -1 \r\n"
+ "daddiu $2, %[src], -1 \r\n"
"dli $6, 0x10 \r\n"
"xor $8, $8, $8 \r\n"
"1: \r\n"
"lbu $4, 0($2) \r\n"
"daddu $8, $8, $4 \r\n"
- "daddu $2, $2, %1 \r\n"
+ "daddu $2, $2, %[stride] \r\n"
"daddiu $6, $6, -1 \r\n"
"bnez $6, 1b \r\n"
"dli $6, 0x10 \r\n"
- "negu $3, %1 \r\n"
- "daddu $2, %0, $3 \r\n"
+ "negu $3, %[stride] \r\n"
+ "daddu $2, %[src], $3 \r\n"
"2: \r\n"
"lbu $4, 0($2) \r\n"
"daddu $8, $8, $4 \r\n"
"bnez $6, 2b \r\n"
"daddiu $8, $8, 0x10 \r\n"
"dsra $8, 5 \r\n"
- "dmul $5, $8, %2 \r\n"
- "daddu $2, %0, $0 \r\n"
+ "dmul $5, $8, %[ff_pb_1] \r\n"
+ "daddu $2, %[src], $0 \r\n"
"dli $6, 0x10 \r\n"
"3: \r\n"
"sdl $5, 7($2) \r\n"
"sdr $5, 0($2) \r\n"
"sdl $5, 15($2) \r\n"
"sdr $5, 8($2) \r\n"
- "daddu $2, $2, %1 \r\n"
+ "daddu $2, $2, %[stride] \r\n"
"daddiu $6, $6, -1 \r\n"
"bnez $6, 3b \r\n"
- ::"r"(src),"r"(stride),"r"(ff_pb_1)
- : "$2","$3","$4","$5","$6","$8","memory"
+ ::[src]"r"(src),[stride]"r"(stride),[ff_pb_1]"r"(ff_pb_1)
+ : "$2","$3","$4","$5","$6","$8"
);
}
void ff_pred8x8l_top_dc_8_mmi(uint8_t *src, int has_topleft,
int has_topright, ptrdiff_t stride)
{
- int y;
uint32_t dc;
__asm__ volatile (
- "ldl $8, 7(%1) \r\n"
- "ldr $8, 0(%1) \r\n"
- "ldl $9, 7(%2) \r\n"
- "ldr $9, 0(%2) \r\n"
- "ldl $10, 7(%3) \r\n"
- "ldr $10, 0(%3) \r\n"
+ "ldl $8, 7(%[srcA]) \r\n"
+ "ldr $8, 0(%[srcA]) \r\n"
+ "ldl $9, 7(%[src0]) \r\n"
+ "ldr $9, 0(%[src0]) \r\n"
+ "ldl $10, 7(%[src1]) \r\n"
+ "ldr $10, 0(%[src1]) \r\n"
"dmtc1 $8, $f2 \r\n"
"dmtc1 $9, $f4 \r\n"
"dmtc1 $10, $f6 \r\n"
"punpckhbh $f14, $f4, $f0 \r\n"
"punpcklbh $f16, $f6, $f0 \r\n"
"punpckhbh $f18, $f6, $f0 \r\n"
- "bnez %4, 1f \r\n"
+ "bnez %[has_topleft], 1f \r\n"
"pinsrh_0 $f8, $f8, $f12 \r\n"
"1: \r\n"
- "bnez %5, 2f \r\n"
+ "bnez %[has_topright], 2f \r\n"
"pinsrh_3 $f18, $f18, $f14 \r\n"
"2: \r\n"
"daddiu $8, $0, 2 \r\n"
"mfc1 $9, $f2 \r\n"
"addiu $9, $9, 4 \r\n"
"dsrl $9, $9, 3 \r\n"
- "li $8, 0x01010101 \r\n"
- "mul %0, $9, $8 \r\n"
- : "=r"(dc)
- : "r"(src-stride-1),"r"(src-stride),"r"(src-stride+1),
- "r"(has_topleft),"r"(has_topright)
- : "$8","$9","$10"
+ "mul %[dc], $9, %[ff_pb_1] \r\n"
+ : [dc]"=r"(dc)
+ : [srcA]"r"(src-stride-1),[src0]"r"(src-stride),
+ [src1]"r"(src-stride+1),[has_topleft]"r"(has_topleft),
+ [has_topright]"r"(has_topright),[ff_pb_1]"r"(ff_pb_1)
+ : "$8","$9","$10","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16",
+ "$f18","$f20","$f22"
);
- for (y=0; y<8; y++) {
- AV_WN4PA(((uint32_t*)src)+0, dc);
- AV_WN4PA(((uint32_t*)src)+1, dc);
- src += stride;
- }
+ __asm__ volatile (
+ "dli $8, 8 \r\n"
+ "1: \r\n"
+ "punpcklwd $f2, %[dc], %[dc] \r\n"
+ "gssdlc1 $f2, 7(%[src]) \r\n"
+ "gssdrc1 $f2, 0(%[src]) \r\n"
+ "daddu %[src], %[src], %[stride] \r\n"
+ "daddi $8, $8, -1 \r\n"
+ "bnez $8, 1b \r\n"
+ : [src]"+&r"(src)
+ : [dc]"f"(dc),[stride]"r"(stride)
+ : "$8","$f2"
+ );
}
void ff_pred8x8l_dc_8_mmi(uint8_t *src, int has_topleft,
int has_topright, ptrdiff_t stride)
{
- int y;
uint32_t dc, dc1, dc2;
const int l0 = ((has_topleft ? src[-1+-1*stride] : src[-1+0*stride]) + 2*src[-1+0*stride] + src[-1+1*stride] + 2) >> 2;
const int l7 = (src[-1+6*stride] + 2*src[-1+7*stride] + src[-1+7*stride] + 2) >> 2;
__asm__ volatile (
- "ldl $8, 7(%1) \r\n"
- "ldr $8, 0(%1) \r\n"
- "ldl $9, 7(%2) \r\n"
- "ldr $9, 0(%2) \r\n"
- "ldl $10, 7(%3) \r\n"
- "ldr $10, 0(%3) \r\n"
+ "ldl $8, 7(%[srcA]) \r\n"
+ "ldr $8, 0(%[srcA]) \r\n"
+ "ldl $9, 7(%[src0]) \r\n"
+ "ldr $9, 0(%[src0]) \r\n"
+ "ldl $10, 7(%[src1]) \r\n"
+ "ldr $10, 0(%[src1]) \r\n"
"dmtc1 $8, $f2 \r\n"
"dmtc1 $9, $f4 \r\n"
"dmtc1 $10, $f6 \r\n"
"pshufh $f30, $f18, $f20 \r\n"
"pinsrh_3 $f10, $f10, $f30 \r\n"
"pinsrh_3 $f18, $f18, $f28 \r\n"
- "bnez %4, 1f \r\n"
+ "bnez %[has_topleft], 1f \r\n"
"pinsrh_0 $f8, $f8, $f12 \r\n"
"1: \r\n"
- "bnez %5, 2f \r\n"
+ "bnez %[has_topright], 2f \r\n"
"pshufh $f30, $f14, $f20 \r\n"
"pinsrh_3 $f10, $f10, $f30 \r\n"
"2: \r\n"
"psrah $f10, $f10, $f20 \r\n"
"packushb $f4, $f8, $f10 \r\n"
"biadd $f2, $f4 \r\n"
- "mfc1 %0, $f2 \r\n"
- : "=r"(dc2)
- : "r"(src-stride-1),"r"(src-stride),"r"(src-stride+1),
- "r"(has_topleft),"r"(has_topright)
- : "$8","$9","$10"
+ "mfc1 %[dc2], $f2 \r\n"
+ : [dc2]"=r"(dc2)
+ : [srcA]"r"(src-stride-1),[src0]"r"(src-stride),
+ [src1]"r"(src-stride+1),[has_topleft]"r"(has_topleft),
+ [has_topright]"r"(has_topright)
+ : "$8","$9","$10","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16",
+ "$f18","$f20","$f22"
);
dc1 = l0+l1+l2+l3+l4+l5+l6+l7;
- dc = PIXEL_SPLAT_X4((dc1+dc2+8)>>4);
+ dc = ((dc1+dc2+8)>>4)*0x01010101U;
- for (y=0; y<8; y++) {
- AV_WN4PA(((uint32_t*)src)+0, dc);
- AV_WN4PA(((uint32_t*)src)+1, dc);
- src += stride;
- }
-}
-
-void ff_pred8x8l_horizontal_8_mmi(uint8_t *src, int has_topleft,
- int has_topright, ptrdiff_t stride)
-{
- const int l0 = ((has_topleft ? src[-1+-1*stride] : src[-1+0*stride]) + 2*src[-1+0*stride] + src[-1+1*stride] + 2) >> 2;
- const int l1 = (src[-1+0*stride] + 2*src[-1+1*stride] + src[-1+2*stride] + 2) >> 2;
- const int l2 = (src[-1+1*stride] + 2*src[-1+2*stride] + src[-1+3*stride] + 2) >> 2;
- const int l3 = (src[-1+2*stride] + 2*src[-1+3*stride] + src[-1+4*stride] + 2) >> 2;
- const int l4 = (src[-1+3*stride] + 2*src[-1+4*stride] + src[-1+5*stride] + 2) >> 2;
- const int l5 = (src[-1+4*stride] + 2*src[-1+5*stride] + src[-1+6*stride] + 2) >> 2;
- const int l6 = (src[-1+5*stride] + 2*src[-1+6*stride] + src[-1+7*stride] + 2) >> 2;
- const int l7 = (src[-1+6*stride] + 2*src[-1+7*stride] + src[-1+7*stride] + 2) >> 2;
-
- AV_WN4PA(src+0*stride, PIXEL_SPLAT_X4(l0));
- AV_WN4PA(src+0*stride+4, PIXEL_SPLAT_X4(l0));
- AV_WN4PA(src+1*stride, PIXEL_SPLAT_X4(l1));
- AV_WN4PA(src+1*stride+4, PIXEL_SPLAT_X4(l1));
- AV_WN4PA(src+2*stride, PIXEL_SPLAT_X4(l2));
- AV_WN4PA(src+2*stride+4, PIXEL_SPLAT_X4(l2));
- AV_WN4PA(src+3*stride, PIXEL_SPLAT_X4(l3));
- AV_WN4PA(src+3*stride+4, PIXEL_SPLAT_X4(l3));
- AV_WN4PA(src+4*stride, PIXEL_SPLAT_X4(l4));
- AV_WN4PA(src+4*stride+4, PIXEL_SPLAT_X4(l4));
- AV_WN4PA(src+5*stride, PIXEL_SPLAT_X4(l5));
- AV_WN4PA(src+5*stride+4, PIXEL_SPLAT_X4(l5));
- AV_WN4PA(src+6*stride, PIXEL_SPLAT_X4(l6));
- AV_WN4PA(src+6*stride+4, PIXEL_SPLAT_X4(l6));
- AV_WN4PA(src+7*stride, PIXEL_SPLAT_X4(l7));
- AV_WN4PA(src+7*stride+4, PIXEL_SPLAT_X4(l7));
+ __asm__ volatile (
+ "dli $8, 8 \r\n"
+ "1: \r\n"
+ "punpcklwd $f2, %[dc], %[dc] \r\n"
+ "gssdlc1 $f2, 7(%[src]) \r\n"
+ "gssdrc1 $f2, 0(%[src]) \r\n"
+ "daddu %[src], %[src], %[stride] \r\n"
+ "daddi $8, $8, -1 \r\n"
+ "bnez $8, 1b \r\n"
+ : [src]"+&r"(src)
+ : [dc]"f"(dc),[stride]"r"(stride)
+ : "$8","$f2"
+ );
}
void ff_pred8x8l_vertical_8_mmi(uint8_t *src, int has_topleft,
int has_topright, ptrdiff_t stride)
{
- int y;
- uint32_t a, b;
-
__asm__ volatile (
- "ldl $8, 7(%1) \r\n"
- "ldr $8, 0(%1) \r\n"
- "ldl $9, 7(%2) \r\n"
- "ldr $9, 0(%2) \r\n"
- "ldl $10, 7(%3) \r\n"
- "ldr $10, 0(%3) \r\n"
+ "ldl $8, 7(%[srcA]) \r\n"
+ "ldr $8, 0(%[srcA]) \r\n"
+ "ldl $9, 7(%[src0]) \r\n"
+ "ldr $9, 0(%[src0]) \r\n"
+ "ldl $10, 7(%[src1]) \r\n"
+ "ldr $10, 0(%[src1]) \r\n"
"dmtc1 $8, $f2 \r\n"
"dmtc1 $9, $f4 \r\n"
"dmtc1 $10, $f6 \r\n"
"punpckhbh $f14, $f4, $f0 \r\n"
"punpcklbh $f16, $f6, $f0 \r\n"
"punpckhbh $f18, $f6, $f0 \r\n"
- "bnez %4, 1f \r\n"
+ "bnez %[has_topleft], 1f \r\n"
"pinsrh_0 $f8, $f8, $f12 \r\n"
"1: \r\n"
- "bnez %5, 2f \r\n"
+ "bnez %[has_topright], 2f \r\n"
"pinsrh_3 $f18, $f18, $f14 \r\n"
"2: \r\n"
"daddiu $8, $0, 2 \r\n"
"psrah $f8, $f8, $f20 \r\n"
"psrah $f10, $f10, $f20 \r\n"
"packushb $f4, $f8, $f10 \r\n"
- "sdc1 $f4, %0 \r\n"
- : "=m"(*src)
- : "r"(src-stride-1),"r"(src-stride),"r"(src-stride+1),
- "r"(has_topleft),"r"(has_topright)
- : "$8","$9","$10"
+ "sdc1 $f4, 0(%[src]) \r\n"
+ : [src]"=r"(src)
+ : [srcA]"r"(src-stride-1),[src0]"r"(src-stride),
+ [src1]"r"(src-stride+1),[has_topleft]"r"(has_topleft),
+ [has_topright]"r"(has_topright)
+ : "$8","$9","$10","$f2","$f4","$f6","$f8","$f10","$f12","$f14","$f16",
+ "$f18","$f20","$f22"
);
- a = AV_RN4PA(((uint32_t*)src)+0);
- b = AV_RN4PA(((uint32_t*)src)+1);
-
- for (y=1; y<8; y++) {
- AV_WN4PA(((uint32_t*)(src+y*stride))+0, a);
- AV_WN4PA(((uint32_t*)(src+y*stride))+1, b);
- }
+ __asm__ volatile (
+ "dli $8, 7 \r\n"
+ "gsldlc1 $f2, 7(%[src]) \r\n"
+ "gsldrc1 $f2, 0(%[src]) \r\n"
+ "dadd %[src], %[src], %[stride] \r\n"
+ "1: \r\n"
+ "gssdlc1 $f2, 7(%[src]) \r\n"
+ "gssdrc1 $f2, 0(%[src]) \r\n"
+ "daddu %[src], %[src], %[stride] \r\n"
+ "daddi $8, $8, -1 \r\n"
+ "bnez $8, 1b \r\n"
+ : [src]"+&r"(src)
+ : [stride]"r"(stride)
+ : "$8","$f2"
+ );
}
void ff_pred4x4_dc_8_mmi(uint8_t *src, const uint8_t *topright,
+ src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
__asm__ volatile (
- "daddu $2, %2, $0 \r\n"
- "dmul $3, $2, %3 \r\n"
+ "daddu $2, %[dc], $0 \r\n"
+ "dmul $3, $2, %[ff_pb_1] \r\n"
"xor $4, $4, $4 \r\n"
- "gsswx $3, 0(%0,$4) \r\n"
- "daddu $4, %1 \r\n"
- "gsswx $3, 0(%0,$4) \r\n"
- "daddu $4, %1 \r\n"
- "gsswx $3, 0(%0,$4) \r\n"
- "daddu $4, %1 \r\n"
- "gsswx $3, 0(%0,$4) \r\n"
- ::"r"(src),"r"(stride),"r"(dc),"r"(ff_pb_1)
- : "$2","$3","$4","memory"
+ "gsswx $3, 0(%[src],$4) \r\n"
+ "daddu $4, %[stride] \r\n"
+ "gsswx $3, 0(%[src],$4) \r\n"
+ "daddu $4, %[stride] \r\n"
+ "gsswx $3, 0(%[src],$4) \r\n"
+ "daddu $4, %[stride] \r\n"
+ "gsswx $3, 0(%[src],$4) \r\n"
+ ::[src]"r"(src),[stride]"r"(stride),[dc]"r"(dc),[ff_pb_1]"r"(ff_pb_1)
+ : "$2","$3","$4"
);
}
void ff_pred8x8_vertical_8_mmi(uint8_t *src, ptrdiff_t stride)
{
__asm__ volatile (
- "dsubu $2, %0, %1 \r\n"
- "daddu $3, %0, $0 \r\n"
+ "dsubu $2, %[src], %[stride] \r\n"
+ "daddu $3, %[src], $0 \r\n"
"ldl $4, 7($2) \r\n"
"ldr $4, 0($2) \r\n"
"dli $5, 0x8 \r\n"
"1: \r\n"
"sdl $4, 7($3) \r\n"
"sdr $4, 0($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"daddiu $5, -1 \r\n"
"bnez $5, 1b \r\n"
- ::"r"(src),"r"(stride)
- : "$2","$3","$4","$5","memory"
+ ::[src]"r"(src),[stride]"r"(stride)
+ : "$2","$3","$4","$5"
);
}
void ff_pred8x8_horizontal_8_mmi(uint8_t *src, ptrdiff_t stride)
{
__asm__ volatile (
- "daddiu $2, %0, -1 \r\n"
- "daddu $3, %0, $0 \r\n"
+ "daddiu $2, %[src], -1 \r\n"
+ "daddu $3, %[src], $0 \r\n"
"dli $6, 0x8 \r\n"
"1: \r\n"
"lbu $4, 0($2) \r\n"
- "dmul $5, $4, %2 \r\n"
+ "dmul $5, $4, %[ff_pb_1] \r\n"
"sdl $5, 7($3) \r\n"
"sdr $5, 0($3) \r\n"
- "daddu $2, %1 \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $2, %[stride] \r\n"
+ "daddu $3, %[stride] \r\n"
"daddiu $6, -1 \r\n"
"bnez $6, 1b \r\n"
- ::"r"(src),"r"(stride),"r"(ff_pb_1)
- : "$2","$3","$4","$5","$6","memory"
+ ::[src]"r"(src),[stride]"r"(stride),[ff_pb_1]"r"(ff_pb_1)
+ : "$2","$3","$4","$5","$6"
);
}
const int svq3, const int rv40)
{
__asm__ volatile (
- "negu $2, %1 \r\n"
- "daddu $3, %0, $2 \r\n"
+ "negu $2, %[stride] \r\n"
+ "daddu $3, %[src], $2 \r\n"
"xor $f8, $f8, $f8 \r\n"
"gslwlc1 $f0, 2($3) \r\n"
"gslwrc1 $f0, -1($3) \r\n"
"punpcklbh $f2, $f2, $f8 \r\n"
"punpcklbh $f4, $f4, $f8 \r\n"
"punpcklbh $f6, $f6, $f8 \r\n"
- "dmtc1 %4, $f20 \r\n"
- "dmtc1 %5, $f22 \r\n"
- "dmtc1 %6, $f24 \r\n"
- "dmtc1 %7, $f26 \r\n"
+ "dmtc1 %[ff_pw_m8tom5], $f20 \r\n"
+ "dmtc1 %[ff_pw_m4tom1], $f22 \r\n"
+ "dmtc1 %[ff_pw_1to4], $f24 \r\n"
+ "dmtc1 %[ff_pw_5to8], $f26 \r\n"
"pmullh $f0, $f0, $f20 \r\n"
"pmullh $f2, $f2, $f22 \r\n"
"pmullh $f4, $f4, $f24 \r\n"
"dmtc1 $4, $f30 \r\n"
"pshufh $f2, $f0, $f30 \r\n"
"paddsh $f10, $f0, $f2 \r\n"
- "daddiu $3, %0, -1 \r\n"
+ "daddiu $3, %[src], -1 \r\n"
"daddu $3, $2 \r\n"
"lbu $4, 0($3) \r\n"
"lbu $8, 16($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $5, 0($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $6, 0($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $7, 0($3) \r\n"
"dsll $5, 16 \r\n"
"dsll $6, 32 \r\n"
"or $4, $5 \r\n"
"or $4, $6 \r\n"
"dmtc1 $4, $f0 \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $4, 0($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $5, 0($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $6, 0($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $7, 0($3) \r\n"
"dsll $5, 16 \r\n"
"dsll $6, 32 \r\n"
"or $4, $5 \r\n"
"or $4, $6 \r\n"
"dmtc1 $4, $f2 \r\n"
- "daddu $3, %1 \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $4, 0($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $5, 0($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $6, 0($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $7, 0($3) \r\n"
"dsll $5, 16 \r\n"
"dsll $6, 32 \r\n"
"or $4, $5 \r\n"
"or $4, $6 \r\n"
"dmtc1 $4, $f4 \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $4, 0($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $5, 0($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $6, 0($3) \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"lbu $7, 0($3) \r\n"
"daddu $8, $7 \r\n"
"daddiu $8, 1 \r\n"
"dmfc1 $3, $f12 \r\n"
"dsll $3, 48 \r\n"
"dsra $3, 48 \r\n"
- "beqz %2, 1f \r\n"
+ "beqz %[svq3], 1f \r\n"
"dli $4, 4 \r\n"
"ddiv $2, $4 \r\n"
"ddiv $3, $4 \r\n"
"daddu $3, $4, $0 \r\n"
"b 2f \r\n"
"1: \r\n"
- "beqz %3, 1f \r\n"
+ "beqz %[rv40], 1f \r\n"
"dsra $4, $2, 2 \r\n"
"daddu $2, $4 \r\n"
"dsra $4, $3, 2 \r\n"
"pshufh $f12, $f12, $f8 \r\n"
"dli $4, 5 \r\n"
"dmtc1 $4, $f14 \r\n"
- "dmtc1 %8, $f2 \r\n"
- "pmullh $f2, $f2, $f0 \r\n"
- "dmtc1 %9, $f4 \r\n"
- "pmullh $f4, $f4, $f0 \r\n"
- "dmtc1 %10, $f6 \r\n"
- "pmullh $f6, $f6, $f0 \r\n"
- "dmtc1 %11, $f8 \r\n"
- "pmullh $f8, $f8, $f0 \r\n"
- "daddu $3, %0, $0 \r\n"
+ "pmullh $f2, %[ff_pw_0to3], $f0 \r\n"
+ "pmullh $f4, %[ff_pw_4to7], $f0 \r\n"
+ "pmullh $f6, %[ff_pw_8tob], $f0 \r\n"
+ "pmullh $f8, %[ff_pw_ctof], $f0 \r\n"
+ "daddu $3, %[src], $0 \r\n"
"dli $2, 16 \r\n"
"1: \r\n"
"paddsh $f16, $f2, $f12 \r\n"
"gssdlc1 $f20, 15($3) \r\n"
"gssdrc1 $f20, 8($3) \r\n"
"paddsh $f12, $f12, $f10 \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $3, %[stride] \r\n"
"daddiu $2, -1 \r\n"
"bnez $2, 1b \r\n"
- ::"r"(src),"r"(stride),"r"(svq3),"r"(rv40),
- "r"(ff_pw_m8tom5),"r"(ff_pw_m4tom1),"r"(ff_pw_1to4),"r"(ff_pw_5to8),
- "r"(ff_pw_0to3),"r"(ff_pw_4to7),"r"(ff_pw_8tob),"r"(ff_pw_ctof)
- : "$2","$3","$4","$5","$6","$7","$8","memory"
+ ::[src]"r"(src),[stride]"r"(stride),[svq3]"r"(svq3),[rv40]"r"(rv40),
+ [ff_pw_m8tom5]"r"(ff_pw_m8tom5),[ff_pw_m4tom1]"r"(ff_pw_m4tom1),
+ [ff_pw_1to4]"r"(ff_pw_1to4),[ff_pw_5to8]"r"(ff_pw_5to8),
+ [ff_pw_0to3]"f"(ff_pw_0to3),[ff_pw_4to7]"f"(ff_pw_4to7),
+ [ff_pw_8tob]"f"(ff_pw_8tob),[ff_pw_ctof]"f"(ff_pw_ctof)
+ : "$2","$3","$4","$5","$6","$7","$8","$f0","$f2","$f4","$f6","$f8",
+ "$f10","$f12","$f14","$f16","$f18","$f20","$f22","$f24","$f26",
+ "$f28","$f30"
);
}
"xor $f0, $f0, $f0 \r\n"
"xor $f2, $f2, $f2 \r\n"
"xor $f30, $f30, $f30 \r\n"
- "negu $3, %1 \r\n"
- "daddu $3, $3, %0 \r\n"
+ "negu $3, %[stride] \r\n"
+ "daddu $3, $3, %[src] \r\n"
"gsldlc1 $f4, 7($3) \r\n"
"gsldrc1 $f4, 0($3) \r\n"
"punpcklbh $f0, $f4, $f30 \r\n"
"packushb $f4, $f0, $f2 \r\n"
"dli $2, 8 \r\n"
"1: \r\n"
- "gssdlc1 $f4, 7(%0) \r\n"
- "gssdrc1 $f4, 0(%0) \r\n"
- "daddu %0, %0, %1 \r\n"
+ "gssdlc1 $f4, 7(%[src]) \r\n"
+ "gssdrc1 $f4, 0(%[src]) \r\n"
+ "daddu %[src], %0, %[stride] \r\n"
"daddiu $2, $2, -1 \r\n"
"bnez $2, 1b \r\n"
- ::"r"(src),"r"(stride)
- : "$2","$3","memory"
+ ::[src]"r"(src),[stride]"r"(stride)
+ : "$2","$3","$f0","$f2","$f4","$f30"
);
}
void ff_pred8x8_dc_8_mmi(uint8_t *src, ptrdiff_t stride)
{
__asm__ volatile (
- "negu $2, %1 \r\n"
- "daddu $2, $2, %0 \r\n"
+ "negu $2, %[stride] \r\n"
+ "daddu $2, $2, %[src] \r\n"
"daddiu $5, $2, 4 \r\n"
"lbu $6, 0($2) \r\n"
"daddu $3, $0, $6 \r\n"
"daddu $4, $4, $6 \r\n"
"daddiu $5, 1 \r\n"
"dli $6, -1 \r\n"
- "daddu $6, $6, %0 \r\n"
+ "daddu $6, $6, %[src] \r\n"
"lbu $5, 0($6) \r\n"
"daddu $7, $0, $5 \r\n"
- "daddu $6, $6, %1 \r\n"
+ "daddu $6, $6, %[stride] \r\n"
"lbu $5, 0($6) \r\n"
"daddu $7, $7, $5 \r\n"
- "daddu $6, $6, %1 \r\n"
+ "daddu $6, $6, %[stride] \r\n"
"lbu $5, 0($6) \r\n"
"daddu $7, $7, $5 \r\n"
- "daddu $6, $6, %1 \r\n"
+ "daddu $6, $6, %[stride] \r\n"
"lbu $5, 0($6) \r\n"
"daddu $7, $7, $5 \r\n"
- "daddu $6, $6, %1 \r\n"
+ "daddu $6, $6, %[stride] \r\n"
"lbu $5, 0($6) \r\n"
"daddu $8, $0, $5 \r\n"
- "daddu $6, $6, %1 \r\n"
+ "daddu $6, $6, %[stride] \r\n"
"lbu $5, 0($6) \r\n"
"daddu $8, $8, $5 \r\n"
- "daddu $6, $6, %1 \r\n"
+ "daddu $6, $6, %[stride] \r\n"
"lbu $5, 0($6) \r\n"
"daddu $8, $8, $5 \r\n"
- "daddu $6, $6, %1 \r\n"
+ "daddu $6, $6, %[stride] \r\n"
"lbu $5, 0($6) \r\n"
"daddu $8, $8, $5 \r\n"
"daddu $3, $3, $7 \r\n"
"pshufh $f6, $f6, $f30 \r\n"
"packushb $f0, $f0, $f2 \r\n"
"packushb $f2, $f4, $f6 \r\n"
- "daddu $2, $0, %0 \r\n"
+ "daddu $2, $0, %[src] \r\n"
"sdc1 $f0, 0($2) \r\n"
- "daddu $2, $2, %1 \r\n"
+ "daddu $2, $2, %[stride] \r\n"
"sdc1 $f0, 0($2) \r\n"
- "daddu $2, $2, %1 \r\n"
+ "daddu $2, $2, %[stride] \r\n"
"sdc1 $f0, 0($2) \r\n"
- "daddu $2, $2, %1 \r\n"
+ "daddu $2, $2, %[stride] \r\n"
"sdc1 $f0, 0($2) \r\n"
- "daddu $2, $2, %1 \r\n"
+ "daddu $2, $2, %[stride] \r\n"
"sdc1 $f2, 0($2) \r\n"
- "daddu $2, $2, %1 \r\n"
+ "daddu $2, $2, %[stride] \r\n"
"sdc1 $f2, 0($2) \r\n"
- "daddu $2, $2, %1 \r\n"
+ "daddu $2, $2, %[stride] \r\n"
"sdc1 $f2, 0($2) \r\n"
- "daddu $2, $2, %1 \r\n"
+ "daddu $2, $2, %[stride] \r\n"
"sdc1 $f2, 0($2) \r\n"
- ::"r"(src),"r"(stride)
- :"$2","$3","$4","$5","$6","$7","$8","memory"
+ ::[src]"r"(src),[stride]"r"(stride)
+ : "$2","$3","$4","$5","$6","$7","$8","$f0","$f2","$f4","$f6","$f30"
);
}
void ff_pred8x16_vertical_8_mmi(uint8_t *src, ptrdiff_t stride)
{
__asm__ volatile (
- "dsubu $2, %0, %1 \r\n"
- "daddu $3, %0, $0 \r\n"
- "ldl $4, 7($2) \r\n"
- "ldr $4, 0($2) \r\n"
- "dli $5, 0x10 \r\n"
+ "gsldlc1 $f2, 7(%[srcA]) \r\n"
+ "gsldrc1 $f2, 0(%[srcA]) \r\n"
+ "dli $8, 16 \r\n"
"1: \r\n"
- "sdl $4, 7($3) \r\n"
- "sdr $4, 0($3) \r\n"
- "daddu $3, %1 \r\n"
- "daddiu $5, -1 \r\n"
- "bnez $5, 1b \r\n"
- ::"r"(src),"r"(stride)
- : "$2","$3","$4","$5","memory"
+ "gssdlc1 $f2, 7(%[src]) \r\n"
+ "gssdrc1 $f2, 0(%[src]) \r\n"
+ "daddu %[src], %[src], %[stride] \r\n"
+ "daddi $8, $8, -1 \r\n"
+ "bnez $8, 1b \r\n"
+ : [src]"+&r"(src)
+ : [stride]"r"(stride),[srcA]"r"(src-stride)
+ : "$8","$f2"
);
}
void ff_pred8x16_horizontal_8_mmi(uint8_t *src, ptrdiff_t stride)
{
__asm__ volatile (
- "daddiu $2, %0, -1 \r\n"
- "daddu $3, %0, $0 \r\n"
+ "daddiu $2, %[src], -1 \r\n"
+ "daddu $3, %[src], $0 \r\n"
"dli $6, 0x10 \r\n"
"1: \r\n"
"lbu $4, 0($2) \r\n"
- "dmul $5, $4, %2 \r\n"
+ "dmul $5, $4, %[ff_pb_1] \r\n"
"sdl $5, 7($3) \r\n"
"sdr $5, 0($3) \r\n"
- "daddu $2, %1 \r\n"
- "daddu $3, %1 \r\n"
+ "daddu $2, %[stride] \r\n"
+ "daddu $3, %[stride] \r\n"
"daddiu $6, -1 \r\n"
"bnez $6, 1b \r\n"
- ::"r"(src),"r"(stride),"r"(ff_pb_1)
- : "$2","$3","$4","$5","$6","memory"
+ ::[src]"r"(src),[stride]"r"(stride),[ff_pb_1]"r"(ff_pb_1)
+ : "$2","$3","$4","$5","$6"
);
}