1 ;******************************************************************************
2 ;* x86-optimized input routines; does shuffling of packed
3 ;* YUV formats into individual planes, and converts RGB
4 ;* into YUV planes also.
5 ;* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
7 ;* This file is part of Libav.
9 ;* Libav is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* Libav is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with Libav; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
25 %include "x86util.asm"
39 rgb_Yrnd: times 4 dd 0x80100 ; 16.5 << 15
40 rgb_UVrnd: times 4 dd 0x400100 ; 128.5 << 15
41 bgr_Ycoeff_12x4: times 2 dw BY, GY, 0, BY
42 bgr_Ycoeff_3x56: times 2 dw RY, 0, GY, RY
43 rgb_Ycoeff_12x4: times 2 dw RY, GY, 0, RY
44 rgb_Ycoeff_3x56: times 2 dw BY, 0, GY, BY
45 bgr_Ucoeff_12x4: times 2 dw BU, GU, 0, BU
46 bgr_Ucoeff_3x56: times 2 dw RU, 0, GU, RU
47 rgb_Ucoeff_12x4: times 2 dw RU, GU, 0, RU
48 rgb_Ucoeff_3x56: times 2 dw BU, 0, GU, BU
49 bgr_Vcoeff_12x4: times 2 dw BV, GV, 0, BV
50 bgr_Vcoeff_3x56: times 2 dw RV, 0, GV, RV
51 rgb_Vcoeff_12x4: times 2 dw RV, GV, 0, RV
52 rgb_Vcoeff_3x56: times 2 dw BV, 0, GV, BV
54 shuf_rgb_12x4: db 0, 0x80, 1, 0x80, 2, 0x80, 3, 0x80, \
55 6, 0x80, 7, 0x80, 8, 0x80, 9, 0x80
56 shuf_rgb_3x56: db 2, 0x80, 3, 0x80, 4, 0x80, 5, 0x80, \
57 8, 0x80, 9, 0x80, 10, 0x80, 11, 0x80
61 ;-----------------------------------------------------------------------------
64 ; void <fmt>ToY_<opt>(uint8_t *dst, const uint8_t *src, int w);
66 ; void <fmt>toUV_<opt>(uint8_t *dstU, uint8_t *dstV, const uint8_t *src,
67 ; const uint8_t *unused, int w);
68 ;-----------------------------------------------------------------------------
70 ; %1 = nr. of XMM registers
72 %macro RGB24_TO_Y_FN 2-3
73 cglobal %2 %+ 24ToY, 6, 6, %1, dst, src, u1, u2, w, u3
75 mova m5, [%2_Ycoeff_12x4]
76 mova m6, [%2_Ycoeff_3x56]
80 mova m8, [%2_Ycoeff_12x4]
81 mova m9, [%2_Ycoeff_3x56]
84 %else ; x86-32 && mmsize == 16
85 %define coeff1 [%2_Ycoeff_12x4]
86 %define coeff2 [%2_Ycoeff_3x56]
87 %endif ; x86-32/64 && mmsize == 8/16
88 %if (ARCH_X86_64 || mmsize == 8) && %0 == 3
89 jmp mangle(program_name %+ _ %+ %3 %+ 24ToY %+ SUFFIX).body
90 %else ; (ARCH_X86_64 && %0 == 3) || mmsize == 8
93 mova m7, [shuf_rgb_12x4]
96 mova m10, [shuf_rgb_3x56]
99 %define shuf_rgb2 [shuf_rgb_3x56]
101 %endif ; cpuflag(ssse3)
108 %if notcpuflag(ssse3)
110 %endif ; !cpuflag(ssse3)
114 movu m0, [srcq+0] ; (byte) { Bx, Gx, Rx }[0-3]
115 movu m2, [srcq+12] ; (byte) { Bx, Gx, Rx }[4-7]
116 pshufb m1, m0, shuf_rgb2 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
117 pshufb m0, shuf_rgb1 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
118 pshufb m3, m2, shuf_rgb2 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
119 pshufb m2, shuf_rgb1 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
120 %else ; !cpuflag(ssse3)
121 movd m0, [srcq+0] ; (byte) { B0, G0, R0, B1 }
122 movd m1, [srcq+2] ; (byte) { R0, B1, G1, R1 }
123 movd m2, [srcq+6] ; (byte) { B2, G2, R2, B3 }
124 movd m3, [srcq+8] ; (byte) { R2, B3, G3, R3 }
125 %if mmsize == 16 ; i.e. sse2
126 punpckldq m0, m2 ; (byte) { B0, G0, R0, B1, B2, G2, R2, B3 }
127 punpckldq m1, m3 ; (byte) { R0, B1, G1, R1, R2, B3, G3, R3 }
128 movd m2, [srcq+12] ; (byte) { B4, G4, R4, B5 }
129 movd m3, [srcq+14] ; (byte) { R4, B5, G5, R5 }
130 movd m5, [srcq+18] ; (byte) { B6, G6, R6, B7 }
131 movd m6, [srcq+20] ; (byte) { R6, B7, G7, R7 }
132 punpckldq m2, m5 ; (byte) { B4, G4, R4, B5, B6, G6, R6, B7 }
133 punpckldq m3, m6 ; (byte) { R4, B5, G5, R5, R6, B7, G7, R7 }
134 %endif ; mmsize == 16
135 punpcklbw m0, m7 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
136 punpcklbw m1, m7 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
137 punpcklbw m2, m7 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
138 punpcklbw m3, m7 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
139 %endif ; cpuflag(ssse3)
140 add srcq, 3 * mmsize / 2
141 pmaddwd m0, coeff1 ; (dword) { B0*BY + G0*GY, B1*BY, B2*BY + G2*GY, B3*BY }
142 pmaddwd m1, coeff2 ; (dword) { R0*RY, G1+GY + R1*RY, R2*RY, G3+GY + R3*RY }
143 pmaddwd m2, coeff1 ; (dword) { B4*BY + G4*GY, B5*BY, B6*BY + G6*GY, B7*BY }
144 pmaddwd m3, coeff2 ; (dword) { R4*RY, G5+GY + R5*RY, R6*RY, G7+GY + R7*RY }
145 paddd m0, m1 ; (dword) { Bx*BY + Gx*GY + Rx*RY }[0-3]
146 paddd m2, m3 ; (dword) { Bx*BY + Gx*GY + Rx*RY }[4-7]
147 paddd m0, m4 ; += rgb_Yrnd, i.e. (dword) { Y[0-3] }
148 paddd m2, m4 ; += rgb_Yrnd, i.e. (dword) { Y[4-7] }
151 packssdw m0, m2 ; (word) { Y[0-7] }
156 %endif ; (ARCH_X86_64 && %0 == 3) || mmsize == 8
159 ; %1 = nr. of XMM registers
161 %macro RGB24_TO_UV_FN 2-3
162 cglobal %2 %+ 24ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, u3
164 mova m8, [%2_Ucoeff_12x4]
165 mova m9, [%2_Ucoeff_3x56]
166 mova m10, [%2_Vcoeff_12x4]
167 mova m11, [%2_Vcoeff_3x56]
173 %define coeffU1 [%2_Ucoeff_12x4]
174 %define coeffU2 [%2_Ucoeff_3x56]
175 %define coeffV1 [%2_Vcoeff_12x4]
176 %define coeffV2 [%2_Vcoeff_3x56]
178 %if ARCH_X86_64 && %0 == 3
179 jmp mangle(program_name %+ _ %+ %3 %+ 24ToUV %+ SUFFIX).body
180 %else ; ARCH_X86_64 && %0 == 3
183 mova m7, [shuf_rgb_12x4]
186 mova m12, [shuf_rgb_3x56]
187 %define shuf_rgb2 m12
189 %define shuf_rgb2 [shuf_rgb_3x56]
191 %endif ; cpuflag(ssse3)
202 %if notcpuflag(ssse3)
207 movu m0, [srcq+0] ; (byte) { Bx, Gx, Rx }[0-3]
208 movu m4, [srcq+12] ; (byte) { Bx, Gx, Rx }[4-7]
209 pshufb m1, m0, shuf_rgb2 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
210 pshufb m0, shuf_rgb1 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
211 %else ; !cpuflag(ssse3)
212 movd m0, [srcq+0] ; (byte) { B0, G0, R0, B1 }
213 movd m1, [srcq+2] ; (byte) { R0, B1, G1, R1 }
214 movd m4, [srcq+6] ; (byte) { B2, G2, R2, B3 }
215 movd m5, [srcq+8] ; (byte) { R2, B3, G3, R3 }
217 punpckldq m0, m4 ; (byte) { B0, G0, R0, B1, B2, G2, R2, B3 }
218 punpckldq m1, m5 ; (byte) { R0, B1, G1, R1, R2, B3, G3, R3 }
219 movd m4, [srcq+12] ; (byte) { B4, G4, R4, B5 }
220 movd m5, [srcq+14] ; (byte) { R4, B5, G5, R5 }
221 %endif ; mmsize == 16
222 punpcklbw m0, m7 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
223 punpcklbw m1, m7 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
224 %endif ; cpuflag(ssse3)
225 pmaddwd m2, m0, coeffV1 ; (dword) { B0*BV + G0*GV, B1*BV, B2*BV + G2*GV, B3*BV }
226 pmaddwd m3, m1, coeffV2 ; (dword) { R0*BV, G1*GV + R1*BV, R2*BV, G3*GV + R3*BV }
227 pmaddwd m0, coeffU1 ; (dword) { B0*BU + G0*GU, B1*BU, B2*BU + G2*GU, B3*BU }
228 pmaddwd m1, coeffU2 ; (dword) { R0*BU, G1*GU + R1*BU, R2*BU, G3*GU + R3*BU }
229 paddd m0, m1 ; (dword) { Bx*BU + Gx*GU + Rx*RU }[0-3]
230 paddd m2, m3 ; (dword) { Bx*BV + Gx*GV + Rx*RV }[0-3]
232 pshufb m5, m4, shuf_rgb2 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
233 pshufb m4, shuf_rgb1 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
234 %else ; !cpuflag(ssse3)
236 movd m1, [srcq+18] ; (byte) { B6, G6, R6, B7 }
237 movd m3, [srcq+20] ; (byte) { R6, B7, G7, R7 }
238 punpckldq m4, m1 ; (byte) { B4, G4, R4, B5, B6, G6, R6, B7 }
239 punpckldq m5, m3 ; (byte) { R4, B5, G5, R5, R6, B7, G7, R7 }
240 %endif ; mmsize == 16 && !cpuflag(ssse3)
241 punpcklbw m4, m7 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
242 punpcklbw m5, m7 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
243 %endif ; cpuflag(ssse3)
244 add srcq, 3 * mmsize / 2
245 pmaddwd m1, m4, coeffU1 ; (dword) { B4*BU + G4*GU, B5*BU, B6*BU + G6*GU, B7*BU }
246 pmaddwd m3, m5, coeffU2 ; (dword) { R4*BU, G5*GU + R5*BU, R6*BU, G7*GU + R7*BU }
247 pmaddwd m4, coeffV1 ; (dword) { B4*BV + G4*GV, B5*BV, B6*BV + G6*GV, B7*BV }
248 pmaddwd m5, coeffV2 ; (dword) { R4*BV, G5*GV + R5*BV, R6*BV, G7*GV + R7*BV }
249 paddd m1, m3 ; (dword) { Bx*BU + Gx*GU + Rx*RU }[4-7]
250 paddd m4, m5 ; (dword) { Bx*BV + Gx*GV + Rx*RV }[4-7]
251 paddd m0, m6 ; += rgb_UVrnd, i.e. (dword) { U[0-3] }
252 paddd m2, m6 ; += rgb_UVrnd, i.e. (dword) { V[0-3] }
253 paddd m1, m6 ; += rgb_UVrnd, i.e. (dword) { U[4-7] }
254 paddd m4, m6 ; += rgb_UVrnd, i.e. (dword) { V[4-7] }
259 packssdw m0, m1 ; (word) { U[0-7] }
260 packssdw m2, m4 ; (word) { V[0-7] }
267 %endif ; mmsize == 8/16
271 %endif ; ARCH_X86_64 && %0 == 3
277 RGB24_TO_Y_FN 0, bgr, rgb
278 RGB24_TO_UV_FN 0, rgb
279 RGB24_TO_UV_FN 0, bgr, rgb
283 RGB24_TO_Y_FN 10, rgb
284 RGB24_TO_Y_FN 10, bgr, rgb
285 RGB24_TO_UV_FN 12, rgb
286 RGB24_TO_UV_FN 12, bgr, rgb
289 RGB24_TO_Y_FN 11, rgb
290 RGB24_TO_Y_FN 11, bgr, rgb
291 RGB24_TO_UV_FN 13, rgb
292 RGB24_TO_UV_FN 13, bgr, rgb
295 RGB24_TO_Y_FN 11, rgb
296 RGB24_TO_Y_FN 11, bgr, rgb
297 RGB24_TO_UV_FN 13, rgb
298 RGB24_TO_UV_FN 13, bgr, rgb
300 ;-----------------------------------------------------------------------------
301 ; YUYV/UYVY/NV12/NV21 packed pixel shuffling.
303 ; void <fmt>ToY_<opt>(uint8_t *dst, const uint8_t *src, int w);
305 ; void <fmt>toUV_<opt>(uint8_t *dstU, uint8_t *dstV, const uint8_t *src,
306 ; const uint8_t *unused, int w);
307 ;-----------------------------------------------------------------------------
309 ; %1 = a (aligned) or u (unaligned)
311 %macro LOOP_YUYV_TO_Y 2
313 mov%1 m0, [srcq+wq*2] ; (byte) { Y0, U0, Y1, V0, ... }
314 mov%1 m1, [srcq+wq*2+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
316 pand m0, m2 ; (word) { Y0, Y1, ..., Y7 }
317 pand m1, m2 ; (word) { Y8, Y9, ..., Y15 }
319 psrlw m0, 8 ; (word) { Y0, Y1, ..., Y7 }
320 psrlw m1, 8 ; (word) { Y8, Y9, ..., Y15 }
322 packuswb m0, m1 ; (byte) { Y0, ..., Y15 }
329 ; %1 = nr. of XMM registers
331 ; %3 = if specified, it means that unaligned and aligned code in loop
332 ; will be the same (i.e. YUYV+AVX), and thus we don't need to
333 ; split the loop in an aligned and unaligned case
334 %macro YUYV_TO_Y_FN 2-3
335 cglobal %2ToY, 5, 5, %1, dst, unused0, unused1, src, w
343 lea srcq, [srcq+wq*2]
345 pcmpeqb m2, m2 ; (byte) { 0xff } x 16
346 psrlw m2, 8 ; (word) { 0x00ff } x 8
358 %endif ; mmsize == 8/16
361 ; %1 = a (aligned) or u (unaligned)
363 %macro LOOP_YUYV_TO_UV 2
366 mov%1 m0, [srcq+wq*4] ; (byte) { Y0, U0, Y1, V0, ... }
367 mov%1 m1, [srcq+wq*4+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
368 psrlw m0, 8 ; (word) { U0, V0, ..., U3, V3 }
369 psrlw m1, 8 ; (word) { U4, V4, ..., U7, V7 }
372 vpand m0, m2, [srcq+wq*4] ; (word) { U0, V0, ..., U3, V3 }
373 vpand m1, m2, [srcq+wq*4+mmsize] ; (word) { U4, V4, ..., U7, V7 }
375 mov%1 m0, [srcq+wq*4] ; (byte) { Y0, U0, Y1, V0, ... }
376 mov%1 m1, [srcq+wq*4+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
377 pand m0, m2 ; (word) { U0, V0, ..., U3, V3 }
378 pand m1, m2 ; (word) { U4, V4, ..., U7, V7 }
381 packuswb m0, m1 ; (byte) { U0, V0, ..., U7, V7 }
382 pand m1, m0, m2 ; (word) { U0, U1, ..., U7 }
383 psrlw m0, 8 ; (word) { V0, V1, ..., V7 }
385 packuswb m1, m0 ; (byte) { U0, ... U7, V1, ... V7 }
387 movhps [dstVq+wq], m1
389 packuswb m1, m1 ; (byte) { U0, ... U3 }
390 packuswb m0, m0 ; (byte) { V0, ... V3 }
393 %endif ; mmsize == 8/16
399 ; %1 = nr. of XMM registers
401 ; %3 = if specified, it means that unaligned and aligned code in loop
402 ; will be the same (i.e. UYVY+AVX), and thus we don't need to
403 ; split the loop in an aligned and unaligned case
404 %macro YUYV_TO_UV_FN 2-3
405 cglobal %2ToUV, 4, 5, %1, dstU, dstV, unused, src, w
413 %if mmsize == 16 && %0 == 2
416 lea srcq, [srcq+wq*4]
417 pcmpeqb m2, m2 ; (byte) { 0xff } x 16
418 psrlw m2, 8 ; (word) { 0x00ff } x 8
419 ; NOTE: if uyvy+avx, u/a are identical
420 %if mmsize == 16 && %0 == 2
423 LOOP_YUYV_TO_UV a, %2
426 LOOP_YUYV_TO_UV u, %2
429 LOOP_YUYV_TO_UV a, %2
430 %endif ; mmsize == 8/16
433 ; %1 = a (aligned) or u (unaligned)
435 %macro LOOP_NVXX_TO_UV 2
437 mov%1 m0, [srcq+wq*2] ; (byte) { U0, V0, U1, V1, ... }
438 mov%1 m1, [srcq+wq*2+mmsize] ; (byte) { U8, V8, U9, V9, ... }
439 pand m2, m0, m5 ; (word) { U0, U1, ..., U7 }
440 pand m3, m1, m5 ; (word) { U8, U9, ..., U15 }
441 psrlw m0, 8 ; (word) { V0, V1, ..., V7 }
442 psrlw m1, 8 ; (word) { V8, V9, ..., V15 }
443 packuswb m2, m3 ; (byte) { U0, ..., U15 }
444 packuswb m0, m1 ; (byte) { V0, ..., V15 }
457 ; %1 = nr. of XMM registers
459 %macro NVXX_TO_UV_FN 2
460 cglobal %2ToUV, 4, 5, %1, dstU, dstV, unused, src, w
471 lea srcq, [srcq+wq*2]
472 pcmpeqb m5, m5 ; (byte) { 0xff } x 16
473 psrlw m5, 8 ; (word) { 0x00ff } x 8
477 LOOP_NVXX_TO_UV a, %2
480 LOOP_NVXX_TO_UV u, %2
483 LOOP_NVXX_TO_UV a, %2
484 %endif ; mmsize == 8/16
491 YUYV_TO_UV_FN 0, yuyv
492 YUYV_TO_UV_FN 0, uyvy
493 NVXX_TO_UV_FN 0, nv12
494 NVXX_TO_UV_FN 0, nv21
500 YUYV_TO_UV_FN 3, yuyv
501 YUYV_TO_UV_FN 3, uyvy
502 NVXX_TO_UV_FN 5, nv12
503 NVXX_TO_UV_FN 5, nv21
507 ; in theory, we could write a yuy2-to-y using vpand (i.e. AVX), but
508 ; that's not faster in practice
509 YUYV_TO_UV_FN 3, yuyv
510 YUYV_TO_UV_FN 3, uyvy, 1
511 NVXX_TO_UV_FN 5, nv12
512 NVXX_TO_UV_FN 5, nv21