1 void composite_line_yuv_sse2_simple(uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight)
3 const static unsigned char const1[] =
5 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00
10 "pxor %%xmm0, %%xmm0 \n\t" /* clear zero register */
11 "movdqu (%4), %%xmm9 \n\t" /* load const1 */
12 "movd %0, %%xmm1 \n\t" /* load weight and decompose */
13 "movlhps %%xmm1, %%xmm1 \n\t"
14 "pshuflw $0, %%xmm1, %%xmm1 \n\t"
15 "pshufhw $0, %%xmm1, %%xmm1 \n\t"
20 00 W 00 W 00 W 00 W 00 W 00 W 00 W 00 W
23 "movq (%1), %%xmm2 \n\t" /* load source alpha */
24 "punpcklbw %%xmm0, %%xmm2 \n\t" /* unpack alpha 8 8-bits alphas to 8 16-bits values */
30 00 A8 00 A7 00 A6 00 A5 00 A4 00 A3 00 A2 00 A1
32 "pmullw %%xmm1, %%xmm2 \n\t" /* premultiply source alpha */
33 "psrlw $8, %%xmm2 \n\t"
38 00 A8 00 A7 00 A6 00 A5 00 A4 00 A3 00 A2 00 A1
43 DSTa = DSTa + (SRCa * (0xFF - DSTa)) >> 8
45 "movq (%5), %%xmm3 \n\t" /* load dst alpha */
46 "punpcklbw %%xmm0, %%xmm3 \n\t" /* unpack dst 8 8-bits alphas to 8 16-bits values */
47 "movdqa %%xmm9, %%xmm4 \n\t"
48 "psubw %%xmm3, %%xmm4 \n\t"
49 "pmullw %%xmm2, %%xmm4 \n\t"
50 "psrlw $8, %%xmm4 \n\t"
51 "paddw %%xmm4, %%xmm3 \n\t"
52 "packuswb %%xmm0, %%xmm3 \n\t"
53 "movq %%xmm3, (%5) \n\t" /* save dst alpha */
55 "movdqu (%2), %%xmm3 \n\t" /* load src */
56 "movdqu (%3), %%xmm4 \n\t" /* load dst */
57 "movdqa %%xmm3, %%xmm5 \n\t" /* dub src */
58 "movdqa %%xmm4, %%xmm6 \n\t" /* dub dst */
66 U8 V8 U7 V7 U6 V6 U5 V5 U4 V4 U3 V3 U2 V2 U1 V1
69 "punpcklbw %%xmm0, %%xmm5 \n\t" /* unpack src low */
70 "punpcklbw %%xmm0, %%xmm6 \n\t" /* unpack dst low */
71 "punpckhbw %%xmm0, %%xmm3 \n\t" /* unpack src high */
72 "punpckhbw %%xmm0, %%xmm4 \n\t" /* unpack dst high */
78 00 U4 00 V4 00 U3 00 V3 00 U2 00 V2 00 U1 00 V1
83 00 U8 00 V8 00 U7 00 V7 00 U6 00 V6 00 U5 00 V5
86 "movdqa %%xmm2, %%xmm7 \n\t" /* dub alpha */
87 "movdqa %%xmm2, %%xmm8 \n\t" /* dub alpha */
88 "movlhps %%xmm7, %%xmm7 \n\t" /* dub low */
89 "movhlps %%xmm8, %%xmm8 \n\t" /* dub high */
94 00 A4 00 A3 00 A2 00 A1 00 A4 00 A3 00 A2 00 A1
97 00 A8 00 A7 00 A6 00 A5 00 A8 00 A7 00 A6 00 A5
100 "pshuflw $0x50, %%xmm7, %%xmm7 \n\t"
101 "pshuflw $0x50, %%xmm8, %%xmm8 \n\t"
102 "pshufhw $0xFA, %%xmm7, %%xmm7 \n\t"
103 "pshufhw $0xFA, %%xmm8, %%xmm8 \n\t"
106 xmm7 (src alpha lower)
108 00 A4 00 A4 00 A3 00 A3 00 A2 00 A2 00 A1 00 A1
110 xmm8 (src alpha upper)
111 00 A8 00 A8 00 A7 00 A7 00 A6 00 A6 00 A5 00 A5
116 DST = SRC * ALPHA + DST * (0xFF - ALPHA)
117 SRC * ALPHA + DST * 0xFF - DST * ALPHA
118 (SRC - DST) * ALPHA + DST * 0xFF
121 "psubw %%xmm4, %%xmm3 \n\t" /* src = src - dst */
122 "psubw %%xmm6, %%xmm5 \n\t"
123 "pmullw %%xmm8, %%xmm3 \n\t" /* src = src * alpha */
124 "pmullw %%xmm7, %%xmm5 \n\t"
125 "pmullw %%xmm9, %%xmm4 \n\t" /* dst = dst * 0xFF */
126 "pmullw %%xmm9, %%xmm6 \n\t"
127 "paddw %%xmm3, %%xmm4 \n\t" /* dst = dst + src */
128 "paddw %%xmm5, %%xmm6 \n\t"
129 "psrlw $8, %%xmm4 \n\t" /* dst = dst >> 8 */
130 "psrlw $8, %%xmm6 \n\t"
131 // "pminsw %%xmm9, %%xmm4 \n\t" /* clamp values */
132 // "pminsw %%xmm9, %%xmm6 \n\t"
137 00 U4 00 V4 00 U3 00 V3 00 U2 00 V2 00 U1 00 V1
141 00 U8 00 V8 00 U7 00 V7 00 U6 00 V6 00 U5 00 V5
143 "packuswb %%xmm4, %%xmm6 \n\t"
148 U8 V8 U7 V7 U6 V6 U5 V5 U4 V4 U3 V3 U2 V2 U1 V1
150 "movdqu %%xmm6, (%3) \n\t" /* store dst */
161 "jnz loop_start \n\t"
164 : "r" (weight >> 8), "r" (alpha_b), "r" (src), "r" (dest), "r" (const1) , "r" (alpha_a), "r" (width / 8)
165 //: "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9", "memory"