void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){
const int w2= (width+1)>>1;
- // SSE2 code runs faster with pointers aligned on a 32-byte boundary.
- IDWTELEM temp_buf[(width>>1) + 4];
- IDWTELEM * const temp = temp_buf + 4 - (((int)temp_buf & 0xF) >> 2);
+ DECLARE_ALIGNED_16(IDWTELEM, temp[width>>1]);
const int w_l= (width>>1);
const int w_r= w2 - 1;
int i;
i = 0;
asm volatile(
"pcmpeqd %%xmm7, %%xmm7 \n\t"
- "psllw $15, %%xmm7 \n\t"
- "psrlw $13, %%xmm7 \n\t"
+ "pcmpeqd %%xmm3, %%xmm3 \n\t"
+ "psllw $1, %%xmm3 \n\t"
+ "paddw %%xmm7, %%xmm3 \n\t"
+ "psllw $13, %%xmm3 \n\t"
::);
for(; i<w_l-15; i+=16){
asm volatile(
"movdqu 18(%1), %%xmm6 \n\t"
"paddw %%xmm1, %%xmm2 \n\t"
"paddw %%xmm5, %%xmm6 \n\t"
- "movdqa %%xmm2, %%xmm0 \n\t"
- "movdqa %%xmm6, %%xmm4 \n\t"
- "paddw %%xmm2, %%xmm2 \n\t"
- "paddw %%xmm6, %%xmm6 \n\t"
- "paddw %%xmm0, %%xmm2 \n\t"
- "paddw %%xmm4, %%xmm6 \n\t"
"paddw %%xmm7, %%xmm2 \n\t"
"paddw %%xmm7, %%xmm6 \n\t"
- "psraw $3, %%xmm2 \n\t"
- "psraw $3, %%xmm6 \n\t"
- "movdqa (%0), %%xmm0 \n\t"
- "movdqa 16(%0), %%xmm4 \n\t"
- "psubw %%xmm2, %%xmm0 \n\t"
- "psubw %%xmm6, %%xmm4 \n\t"
- "movdqa %%xmm0, (%0) \n\t"
- "movdqa %%xmm4, 16(%0) \n\t"
+ "pmulhw %%xmm3, %%xmm2 \n\t"
+ "pmulhw %%xmm3, %%xmm6 \n\t"
+ "paddw (%0), %%xmm2 \n\t"
+ "paddw 16(%0), %%xmm6 \n\t"
+ "movdqa %%xmm2, (%0) \n\t"
+ "movdqa %%xmm6, 16(%0) \n\t"
:: "r"(&b[i]), "r"(&ref[i])
: "memory"
);
i = 0;
asm volatile(
- "psllw $1, %%xmm7 \n\t"
+ "psllw $15, %%xmm7 \n\t"
+ "pcmpeqw %%xmm6, %%xmm6 \n\t"
+ "psrlw $13, %%xmm6 \n\t"
+ "paddw %%xmm7, %%xmm6 \n\t"
::);
for(; i<w_l-15; i+=16){
asm volatile(
- "movdqu (%1), %%xmm1 \n\t"
- "movdqu 16(%1), %%xmm5 \n\t"
- "movdqu 2(%1), %%xmm0 \n\t"
- "movdqu 18(%1), %%xmm4 \n\t" //FIXME try aligned reads and shifts
- "paddw %%xmm1, %%xmm0 \n\t"
- "paddw %%xmm5, %%xmm4 \n\t"
- "paddw %%xmm7, %%xmm0 \n\t"
- "paddw %%xmm7, %%xmm4 \n\t"
+ "movdqu (%1), %%xmm0 \n\t"
+ "movdqu 16(%1), %%xmm4 \n\t"
+ "movdqu 2(%1), %%xmm1 \n\t"
+ "movdqu 18(%1), %%xmm5 \n\t" //FIXME try aligned reads and shifts
+ "paddw %%xmm6, %%xmm0 \n\t"
+ "paddw %%xmm6, %%xmm4 \n\t"
+ "paddw %%xmm7, %%xmm1 \n\t"
+ "paddw %%xmm7, %%xmm5 \n\t"
+ "pavgw %%xmm1, %%xmm0 \n\t"
+ "pavgw %%xmm5, %%xmm4 \n\t"
+ "psubw %%xmm7, %%xmm0 \n\t"
+ "psubw %%xmm7, %%xmm4 \n\t"
+ "psraw $1, %%xmm0 \n\t"
+ "psraw $1, %%xmm4 \n\t"
"movdqa (%0), %%xmm1 \n\t"
"movdqa 16(%0), %%xmm5 \n\t"
- "psraw $2, %%xmm0 \n\t"
- "psraw $2, %%xmm4 \n\t"
"paddw %%xmm1, %%xmm0 \n\t"
"paddw %%xmm5, %%xmm4 \n\t"
"psraw $2, %%xmm0 \n\t"
b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
asm volatile(
"pcmpeqw %%mm7, %%mm7 \n\t"
- "psllw $15, %%mm7 \n\t"
- "psrlw $13, %%mm7 \n\t"
+ "pcmpeqw %%mm3, %%mm3 \n\t"
+ "psllw $1, %%mm3 \n\t"
+ "paddw %%mm7, %%mm3 \n\t"
+ "psllw $13, %%mm3 \n\t"
::);
for(; i<w_l-7; i+=8){
asm volatile(
"movq 8(%1), %%mm6 \n\t"
"paddw 2(%1), %%mm2 \n\t"
"paddw 10(%1), %%mm6 \n\t"
- "movq %%mm2, %%mm0 \n\t"
- "movq %%mm6, %%mm4 \n\t"
- "paddw %%mm2, %%mm2 \n\t"
- "paddw %%mm6, %%mm6 \n\t"
- "paddw %%mm0, %%mm2 \n\t"
- "paddw %%mm4, %%mm6 \n\t"
"paddw %%mm7, %%mm2 \n\t"
"paddw %%mm7, %%mm6 \n\t"
- "psraw $3, %%mm2 \n\t"
- "psraw $3, %%mm6 \n\t"
- "movq (%0), %%mm0 \n\t"
- "movq 8(%0), %%mm4 \n\t"
- "psubw %%mm2, %%mm0 \n\t"
- "psubw %%mm6, %%mm4 \n\t"
- "movq %%mm0, (%0) \n\t"
- "movq %%mm4, 8(%0) \n\t"
+ "pmulhw %%mm3, %%mm2 \n\t"
+ "pmulhw %%mm3, %%mm6 \n\t"
+ "paddw (%0), %%mm2 \n\t"
+ "paddw 8(%0), %%mm6 \n\t"
+ "movq %%mm2, (%0) \n\t"
+ "movq %%mm6, 8(%0) \n\t"
:: "r"(&b[i]), "r"(&ref[i])
: "memory"
);
i = 1;
b[0] = b[0] + (((2 * ref[1] + W_BO) + 4 * b[0]) >> W_BS);
asm volatile(
- "psllw $1, %%mm7 \n\t"
+ "psllw $15, %%mm7 \n\t"
+ "pcmpeqw %%mm6, %%mm6 \n\t"
+ "psrlw $13, %%mm6 \n\t"
+ "paddw %%mm7, %%mm6 \n\t"
::);
for(; i<w_l-7; i+=8){
asm volatile(
"movq (%1), %%mm0 \n\t"
"movq 8(%1), %%mm4 \n\t"
- "paddw 2(%1), %%mm0 \n\t"
- "paddw 10(%1), %%mm4 \n\t"
- "paddw %%mm7, %%mm0 \n\t"
- "paddw %%mm7, %%mm4 \n\t"
- "psraw $2, %%mm0 \n\t"
- "psraw $2, %%mm4 \n\t"
+ "movq 2(%1), %%mm1 \n\t"
+ "movq 10(%1), %%mm5 \n\t"
+ "paddw %%mm6, %%mm0 \n\t"
+ "paddw %%mm6, %%mm4 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+ "paddw %%mm7, %%mm5 \n\t"
+ "pavgw %%mm1, %%mm0 \n\t"
+ "pavgw %%mm5, %%mm4 \n\t"
+ "psubw %%mm7, %%mm0 \n\t"
+ "psubw %%mm7, %%mm4 \n\t"
+ "psraw $1, %%mm0 \n\t"
+ "psraw $1, %%mm4 \n\t"
"movq (%0), %%mm1 \n\t"
"movq 8(%0), %%mm5 \n\t"
"paddw %%mm1, %%mm0 \n\t"
}
}
+#ifdef HAVE_7REGS
#define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\
- ""op" (%%"r",%%"REG_d",2), %%"t0" \n\t"\
- ""op" 16(%%"r",%%"REG_d",2), %%"t1" \n\t"\
- ""op" 32(%%"r",%%"REG_d",2), %%"t2" \n\t"\
- ""op" 48(%%"r",%%"REG_d",2), %%"t3" \n\t"
+ ""op" ("r",%%"REG_d"), %%"t0" \n\t"\
+ ""op" 16("r",%%"REG_d"), %%"t1" \n\t"\
+ ""op" 32("r",%%"REG_d"), %%"t2" \n\t"\
+ ""op" 48("r",%%"REG_d"), %%"t3" \n\t"
#define snow_vertical_compose_sse2_load(r,t0,t1,t2,t3)\
snow_vertical_compose_sse2_load_add("movdqa",r,t0,t1,t2,t3)
#define snow_vertical_compose_sse2_add(r,t0,t1,t2,t3)\
snow_vertical_compose_sse2_load_add("paddw",r,t0,t1,t2,t3)
-#define snow_vertical_compose_sse2_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
+#define snow_vertical_compose_r2r_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
"psubw %%"s0", %%"t0" \n\t"\
"psubw %%"s1", %%"t1" \n\t"\
"psubw %%"s2", %%"t2" \n\t"\
"psubw %%"s3", %%"t3" \n\t"
#define snow_vertical_compose_sse2_store(w,s0,s1,s2,s3)\
- "movdqa %%"s0", (%%"w",%%"REG_d",2) \n\t"\
- "movdqa %%"s1", 16(%%"w",%%"REG_d",2) \n\t"\
- "movdqa %%"s2", 32(%%"w",%%"REG_d",2) \n\t"\
- "movdqa %%"s3", 48(%%"w",%%"REG_d",2) \n\t"
+ "movdqa %%"s0", ("w",%%"REG_d") \n\t"\
+ "movdqa %%"s1", 16("w",%%"REG_d") \n\t"\
+ "movdqa %%"s2", 32("w",%%"REG_d") \n\t"\
+ "movdqa %%"s3", 48("w",%%"REG_d") \n\t"
-#define snow_vertical_compose_sse2_sra(n,t0,t1,t2,t3)\
+#define snow_vertical_compose_sra(n,t0,t1,t2,t3)\
"psraw $"n", %%"t0" \n\t"\
"psraw $"n", %%"t1" \n\t"\
"psraw $"n", %%"t2" \n\t"\
"psraw $"n", %%"t3" \n\t"
-#define snow_vertical_compose_sse2_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
+#define snow_vertical_compose_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
"paddw %%"s0", %%"t0" \n\t"\
"paddw %%"s1", %%"t1" \n\t"\
"paddw %%"s2", %%"t2" \n\t"\
"paddw %%"s3", %%"t3" \n\t"
+#define snow_vertical_compose_r2r_pmulhw(s0,s1,s2,s3,t0,t1,t2,t3)\
+ "pmulhw %%"s0", %%"t0" \n\t"\
+ "pmulhw %%"s1", %%"t1" \n\t"\
+ "pmulhw %%"s2", %%"t2" \n\t"\
+ "pmulhw %%"s3", %%"t3" \n\t"
+
#define snow_vertical_compose_sse2_move(s0,s1,s2,s3,t0,t1,t2,t3)\
"movdqa %%"s0", %%"t0" \n\t"\
"movdqa %%"s1", %%"t1" \n\t"\
b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
}
+ i+=i;
asm volatile (
"jmp 2f \n\t"
"1: \n\t"
-
- "mov %6, %%"REG_a" \n\t"
- "mov %4, %%"REG_S" \n\t"
-
- snow_vertical_compose_sse2_load(REG_S,"xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
- snow_vertical_compose_sse2_r2r_add("xmm0","xmm2","xmm4","xmm6","xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
-
- "pcmpeqd %%xmm1, %%xmm1 \n\t"
- "psllw $15, %%xmm1 \n\t"
- "psrlw $13, %%xmm1 \n\t"
- "mov %5, %%"REG_a" \n\t"
-
- snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_sra("3","xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_load(REG_a,"xmm1","xmm3","xmm5","xmm7")
- snow_vertical_compose_sse2_sub("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
- snow_vertical_compose_sse2_store(REG_a,"xmm1","xmm3","xmm5","xmm7")
- "mov %3, %%"REG_c" \n\t"
- snow_vertical_compose_sse2_load(REG_S,"xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_add(REG_c,"xmm1","xmm3","xmm5","xmm7")
- snow_vertical_compose_sse2_sub("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_store(REG_S,"xmm0","xmm2","xmm4","xmm6")
- "mov %2, %%"REG_a" \n\t"
- snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_sra("2","xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_add(REG_c,"xmm0","xmm2","xmm4","xmm6")
-
- "pcmpeqd %%xmm1, %%xmm1 \n\t"
- "psllw $15, %%xmm1 \n\t"
- "psrlw $14, %%xmm1 \n\t"
- "mov %1, %%"REG_S" \n\t"
-
- snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_sra("2","xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_add(REG_c,"xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_store(REG_c,"xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_add(REG_S,"xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_add("%6","xmm0","xmm2","xmm4","xmm6")
+
+
+ "pcmpeqw %%xmm0, %%xmm0 \n\t"
+ "pcmpeqw %%xmm2, %%xmm2 \n\t"
+ "paddw %%xmm2, %%xmm2 \n\t"
+ "paddw %%xmm0, %%xmm2 \n\t"
+ "psllw $13, %%xmm2 \n\t"
+ snow_vertical_compose_r2r_add("xmm0","xmm0","xmm0","xmm0","xmm1","xmm3","xmm5","xmm7")
+ snow_vertical_compose_r2r_pmulhw("xmm2","xmm2","xmm2","xmm2","xmm1","xmm3","xmm5","xmm7")
+ snow_vertical_compose_sse2_add("%5","xmm1","xmm3","xmm5","xmm7")
+ snow_vertical_compose_sse2_store("%5","xmm1","xmm3","xmm5","xmm7")
+ snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_add("%3","xmm1","xmm3","xmm5","xmm7")
+ snow_vertical_compose_r2r_sub("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_store("%4","xmm0","xmm2","xmm4","xmm6")
+
+ "pcmpeqw %%xmm7, %%xmm7 \n\t"
+ "pcmpeqw %%xmm5, %%xmm5 \n\t"
+ "psllw $15, %%xmm7 \n\t"
+ "psrlw $13, %%xmm5 \n\t"
+ "paddw %%xmm7, %%xmm5 \n\t"
+ snow_vertical_compose_r2r_add("xmm5","xmm5","xmm5","xmm5","xmm0","xmm2","xmm4","xmm6")
+ "movq (%2,%%"REG_d"), %%xmm1 \n\t"
+ "movq 8(%2,%%"REG_d"), %%xmm3 \n\t"
+ "paddw %%xmm7, %%xmm1 \n\t"
+ "paddw %%xmm7, %%xmm3 \n\t"
+ "pavgw %%xmm1, %%xmm0 \n\t"
+ "pavgw %%xmm3, %%xmm2 \n\t"
+ "movq 16(%2,%%"REG_d"), %%xmm1 \n\t"
+ "movq 24(%2,%%"REG_d"), %%xmm3 \n\t"
+ "paddw %%xmm7, %%xmm1 \n\t"
+ "paddw %%xmm7, %%xmm3 \n\t"
+ "pavgw %%xmm1, %%xmm4 \n\t"
+ "pavgw %%xmm3, %%xmm6 \n\t"
+ snow_vertical_compose_r2r_sub("xmm7","xmm7","xmm7","xmm7","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sra("1","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_add("%3","xmm0","xmm2","xmm4","xmm6")
+
+ snow_vertical_compose_sra("2","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_add("%3","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_store("%3","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_add("%1","xmm0","xmm2","xmm4","xmm6")
snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
- snow_vertical_compose_sse2_sra("1","xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
- snow_vertical_compose_sse2_store(REG_a,"xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sra("1","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_add("%2","xmm0","xmm2","xmm4","xmm6")
+ snow_vertical_compose_sse2_store("%2","xmm0","xmm2","xmm4","xmm6")
"2: \n\t"
- "sub $32, %%"REG_d" \n\t"
+ "sub $64, %%"REG_d" \n\t"
"jge 1b \n\t"
:"+d"(i)
- :
- "m"(b0),"m"(b1),"m"(b2),"m"(b3),"m"(b4),"m"(b5):
- "%"REG_a"","%"REG_S"","%"REG_c"");
+ :"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5));
}
#define snow_vertical_compose_mmx_load_add(op,r,t0,t1,t2,t3)\
- ""op" (%%"r",%%"REG_d",2), %%"t0" \n\t"\
- ""op" 8(%%"r",%%"REG_d",2), %%"t1" \n\t"\
- ""op" 16(%%"r",%%"REG_d",2), %%"t2" \n\t"\
- ""op" 24(%%"r",%%"REG_d",2), %%"t3" \n\t"
+ ""op" ("r",%%"REG_d"), %%"t0" \n\t"\
+ ""op" 8("r",%%"REG_d"), %%"t1" \n\t"\
+ ""op" 16("r",%%"REG_d"), %%"t2" \n\t"\
+ ""op" 24("r",%%"REG_d"), %%"t3" \n\t"
#define snow_vertical_compose_mmx_load(r,t0,t1,t2,t3)\
snow_vertical_compose_mmx_load_add("movq",r,t0,t1,t2,t3)
#define snow_vertical_compose_mmx_add(r,t0,t1,t2,t3)\
snow_vertical_compose_mmx_load_add("paddw",r,t0,t1,t2,t3)
-#define snow_vertical_compose_mmx_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
- snow_vertical_compose_sse2_sub(s0,s1,s2,s3,t0,t1,t2,t3)
-
#define snow_vertical_compose_mmx_store(w,s0,s1,s2,s3)\
- "movq %%"s0", (%%"w",%%"REG_d",2) \n\t"\
- "movq %%"s1", 8(%%"w",%%"REG_d",2) \n\t"\
- "movq %%"s2", 16(%%"w",%%"REG_d",2) \n\t"\
- "movq %%"s3", 24(%%"w",%%"REG_d",2) \n\t"
-
-#define snow_vertical_compose_mmx_sra(n,t0,t1,t2,t3)\
- snow_vertical_compose_sse2_sra(n,t0,t1,t2,t3)
-
-#define snow_vertical_compose_mmx_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
- snow_vertical_compose_sse2_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)
+ "movq %%"s0", ("w",%%"REG_d") \n\t"\
+ "movq %%"s1", 8("w",%%"REG_d") \n\t"\
+ "movq %%"s2", 16("w",%%"REG_d") \n\t"\
+ "movq %%"s3", 24("w",%%"REG_d") \n\t"
#define snow_vertical_compose_mmx_move(s0,s1,s2,s3,t0,t1,t2,t3)\
"movq %%"s0", %%"t0" \n\t"\
"movq %%"s2", %%"t2" \n\t"\
"movq %%"s3", %%"t3" \n\t"
+
void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){
long i = width;
while(i & 15)
b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
}
-
+ i+=i;
asm volatile(
"jmp 2f \n\t"
"1: \n\t"
- "mov %6, %%"REG_a" \n\t"
- "mov %4, %%"REG_S" \n\t"
-
- snow_vertical_compose_mmx_load(REG_S,"mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_load("%4","mm1","mm3","mm5","mm7")
+ snow_vertical_compose_mmx_add("%6","mm1","mm3","mm5","mm7")
+ "pcmpeqw %%mm0, %%mm0 \n\t"
+ "pcmpeqw %%mm2, %%mm2 \n\t"
+ "paddw %%mm2, %%mm2 \n\t"
+ "paddw %%mm0, %%mm2 \n\t"
+ "psllw $13, %%mm2 \n\t"
+ snow_vertical_compose_r2r_add("mm0","mm0","mm0","mm0","mm1","mm3","mm5","mm7")
+ snow_vertical_compose_r2r_pmulhw("mm2","mm2","mm2","mm2","mm1","mm3","mm5","mm7")
+ snow_vertical_compose_mmx_add("%5","mm1","mm3","mm5","mm7")
+ snow_vertical_compose_mmx_store("%5","mm1","mm3","mm5","mm7")
+ snow_vertical_compose_mmx_load("%4","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_add("%3","mm1","mm3","mm5","mm7")
+ snow_vertical_compose_r2r_sub("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_store("%4","mm0","mm2","mm4","mm6")
+ "pcmpeqw %%mm7, %%mm7 \n\t"
+ "pcmpeqw %%mm5, %%mm5 \n\t"
+ "psllw $15, %%mm7 \n\t"
+ "psrlw $13, %%mm5 \n\t"
+ "paddw %%mm7, %%mm5 \n\t"
+ snow_vertical_compose_r2r_add("mm5","mm5","mm5","mm5","mm0","mm2","mm4","mm6")
+ "movq (%2,%%"REG_d"), %%mm1 \n\t"
+ "movq 8(%2,%%"REG_d"), %%mm3 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+ "paddw %%mm7, %%mm3 \n\t"
+ "pavgw %%mm1, %%mm0 \n\t"
+ "pavgw %%mm3, %%mm2 \n\t"
+ "movq 16(%2,%%"REG_d"), %%mm1 \n\t"
+ "movq 24(%2,%%"REG_d"), %%mm3 \n\t"
+ "paddw %%mm7, %%mm1 \n\t"
+ "paddw %%mm7, %%mm3 \n\t"
+ "pavgw %%mm1, %%mm4 \n\t"
+ "pavgw %%mm3, %%mm6 \n\t"
+ snow_vertical_compose_r2r_sub("mm7","mm7","mm7","mm7","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_sra("1","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_add("%3","mm0","mm2","mm4","mm6")
+
+ snow_vertical_compose_sra("2","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_add("%3","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_store("%3","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_add("%1","mm0","mm2","mm4","mm6")
snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
- snow_vertical_compose_mmx_r2r_add("mm0","mm2","mm4","mm6","mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
-
- "pcmpeqw %%mm1, %%mm1 \n\t"
- "psllw $15, %%mm1 \n\t"
- "psrlw $13, %%mm1 \n\t"
- "mov %5, %%"REG_a" \n\t"
-
- snow_vertical_compose_mmx_r2r_add("mm1","mm1","mm1","mm1","mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_sra("3","mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_load(REG_a,"mm1","mm3","mm5","mm7")
- snow_vertical_compose_mmx_sub("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
- snow_vertical_compose_mmx_store(REG_a,"mm1","mm3","mm5","mm7")
- "mov %3, %%"REG_c" \n\t"
- snow_vertical_compose_mmx_load(REG_S,"mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_add(REG_c,"mm1","mm3","mm5","mm7")
- snow_vertical_compose_mmx_sub("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_store(REG_S,"mm0","mm2","mm4","mm6")
- "mov %2, %%"REG_a" \n\t"
- snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_sra("2","mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_add(REG_c,"mm0","mm2","mm4","mm6")
-
- "pcmpeqw %%mm1, %%mm1 \n\t"
- "psllw $15, %%mm1 \n\t"
- "psrlw $14, %%mm1 \n\t"
- "mov %1, %%"REG_S" \n\t"
-
- snow_vertical_compose_mmx_r2r_add("mm1","mm1","mm1","mm1","mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_sra("2","mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_add(REG_c,"mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_store(REG_c,"mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_add(REG_S,"mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
- snow_vertical_compose_mmx_sra("1","mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
- snow_vertical_compose_mmx_store(REG_a,"mm0","mm2","mm4","mm6")
+ snow_vertical_compose_sra("1","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_add("%2","mm0","mm2","mm4","mm6")
+ snow_vertical_compose_mmx_store("%2","mm0","mm2","mm4","mm6")
"2: \n\t"
- "sub $16, %%"REG_d" \n\t"
+ "sub $32, %%"REG_d" \n\t"
"jge 1b \n\t"
:"+d"(i)
- :
- "m"(b0),"m"(b1),"m"(b2),"m"(b3),"m"(b4),"m"(b5):
- "%"REG_a"","%"REG_S"","%"REG_c"");
+ :"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5));
}
+#endif //HAVE_7REGS
#define snow_inner_add_yblock_sse2_header \
IDWTELEM * * dst_array = sb->line + src_y;\