2 * composite_line_yuv_sse2_simple.c
3 * Copyright (C) 2003-2004 Ushodaya Enterprises Limited
4 * Author: Maksym Veremeyenko <verem@m1stereo.tv>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 void composite_line_yuv_sse2_simple(uint8_t *dest, uint8_t *src, int width, uint8_t *alpha_b, uint8_t *alpha_a, int weight)
24 const static unsigned char const1[] =
26 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00
28 const static unsigned char const2[] =
30 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00
35 "pxor %%xmm0, %%xmm0 \n\t" /* clear zero register */
36 "movdqu (%4), %%xmm9 \n\t" /* load const1 */
37 "movdqu (%7), %%xmm10 \n\t" /* load const2 */
38 "movd %0, %%xmm1 \n\t" /* load weight and decompose */
39 "movlhps %%xmm1, %%xmm1 \n\t"
40 "pshuflw $0, %%xmm1, %%xmm1 \n\t"
41 "pshufhw $0, %%xmm1, %%xmm1 \n\t"
46 00 W 00 W 00 W 00 W 00 W 00 W 00 W 00 W
49 "movq (%1), %%xmm2 \n\t" /* load source alpha */
50 "punpcklbw %%xmm0, %%xmm2 \n\t" /* unpack alpha 8 8-bits alphas to 8 16-bits values */
56 00 A8 00 A7 00 A6 00 A5 00 A4 00 A3 00 A2 00 A1
58 "pmullw %%xmm1, %%xmm2 \n\t" /* premultiply source alpha */
59 "psrlw $8, %%xmm2 \n\t"
64 00 A8 00 A7 00 A6 00 A5 00 A4 00 A3 00 A2 00 A1
69 DSTa = DSTa + (SRCa * (0xFF - DSTa)) >> 8
71 "movq (%5), %%xmm3 \n\t" /* load dst alpha */
72 "punpcklbw %%xmm0, %%xmm3 \n\t" /* unpack dst 8 8-bits alphas to 8 16-bits values */
73 "movdqa %%xmm9, %%xmm4 \n\t"
74 "psubw %%xmm3, %%xmm4 \n\t"
75 "pmullw %%xmm2, %%xmm4 \n\t"
76 "movdqa %%xmm4, %%xmm5 \n\t"
77 "psrlw $8, %%xmm4 \n\t"
78 "paddw %%xmm5, %%xmm4 \n\t"
79 "paddw %%xmm10, %%xmm4 \n\t"
80 "psrlw $8, %%xmm4 \n\t"
81 "paddw %%xmm4, %%xmm3 \n\t"
82 "packuswb %%xmm0, %%xmm3 \n\t"
83 "movq %%xmm3, (%5) \n\t" /* save dst alpha */
85 "movdqu (%2), %%xmm3 \n\t" /* load src */
86 "movdqu (%3), %%xmm4 \n\t" /* load dst */
87 "movdqa %%xmm3, %%xmm5 \n\t" /* dub src */
88 "movdqa %%xmm4, %%xmm6 \n\t" /* dub dst */
96 U8 V8 U7 V7 U6 V6 U5 V5 U4 V4 U3 V3 U2 V2 U1 V1
99 "punpcklbw %%xmm0, %%xmm5 \n\t" /* unpack src low */
100 "punpcklbw %%xmm0, %%xmm6 \n\t" /* unpack dst low */
101 "punpckhbw %%xmm0, %%xmm3 \n\t" /* unpack src high */
102 "punpckhbw %%xmm0, %%xmm4 \n\t" /* unpack dst high */
108 00 U4 00 V4 00 U3 00 V3 00 U2 00 V2 00 U1 00 V1
113 00 U8 00 V8 00 U7 00 V7 00 U6 00 V6 00 U5 00 V5
116 "movdqa %%xmm2, %%xmm7 \n\t" /* dub alpha */
117 "movdqa %%xmm2, %%xmm8 \n\t" /* dub alpha */
118 "movlhps %%xmm7, %%xmm7 \n\t" /* dub low */
119 "movhlps %%xmm8, %%xmm8 \n\t" /* dub high */
124 00 A4 00 A3 00 A2 00 A1 00 A4 00 A3 00 A2 00 A1
127 00 A8 00 A7 00 A6 00 A5 00 A8 00 A7 00 A6 00 A5
130 "pshuflw $0x50, %%xmm7, %%xmm7 \n\t"
131 "pshuflw $0x50, %%xmm8, %%xmm8 \n\t"
132 "pshufhw $0xFA, %%xmm7, %%xmm7 \n\t"
133 "pshufhw $0xFA, %%xmm8, %%xmm8 \n\t"
136 xmm7 (src alpha lower)
138 00 A4 00 A4 00 A3 00 A3 00 A2 00 A2 00 A1 00 A1
140 xmm8 (src alpha upper)
141 00 A8 00 A8 00 A7 00 A7 00 A6 00 A6 00 A5 00 A5
146 DST = SRC * ALPHA + DST * (0xFF - ALPHA)
147 SRC * ALPHA + DST * 0xFF - DST * ALPHA
148 (SRC - DST) * ALPHA + DST * 0xFF
151 "psubw %%xmm4, %%xmm3 \n\t" /* src = src - dst */
152 "psubw %%xmm6, %%xmm5 \n\t"
153 "pmullw %%xmm8, %%xmm3 \n\t" /* src = src * alpha */
154 "pmullw %%xmm7, %%xmm5 \n\t"
155 "pmullw %%xmm9, %%xmm4 \n\t" /* dst = dst * 0xFF */
156 "pmullw %%xmm9, %%xmm6 \n\t"
157 "paddw %%xmm3, %%xmm4 \n\t" /* dst = dst + src */
158 "paddw %%xmm5, %%xmm6 \n\t"
159 "movdqa %%xmm4, %%xmm3 \n\t" /* dst = ((dst >> 8) + dst + 128) >> 8 */
160 "movdqa %%xmm6, %%xmm5 \n\t"
161 "psrlw $8, %%xmm4 \n\t"
162 "psrlw $8, %%xmm6 \n\t"
163 "paddw %%xmm3, %%xmm4 \n\t"
164 "paddw %%xmm5, %%xmm6 \n\t"
165 "paddw %%xmm10, %%xmm4 \n\t"
166 "paddw %%xmm10, %%xmm6 \n\t"
167 "psrlw $8, %%xmm4 \n\t"
168 "psrlw $8, %%xmm6 \n\t"
169 // "pminsw %%xmm9, %%xmm4 \n\t" /* clamp values */
170 // "pminsw %%xmm9, %%xmm6 \n\t"
175 00 U4 00 V4 00 U3 00 V3 00 U2 00 V2 00 U1 00 V1
179 00 U8 00 V8 00 U7 00 V7 00 U6 00 V6 00 U5 00 V5
181 "packuswb %%xmm4, %%xmm6 \n\t"
186 U8 V8 U7 V7 U6 V6 U5 V5 U4 V4 U3 V3 U2 V2 U1 V1
188 "movdqu %%xmm6, (%3) \n\t" /* store dst */
199 "jnz loop_start \n\t"
202 : "r" (weight >> 8), "r" (alpha_b), "r" (src), "r" (dest), "r" (const1) , "r" (alpha_a), "r" (width / 8), "r" (const2)
203 //: "xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7","xmm8","xmm9", "memory"