1 ;******************************************************************************
2 ;* x86-optimized input routines; does shuffling of packed
3 ;* YUV formats into individual planes, and converts RGB
4 ;* into YUV planes also.
5 ;* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
7 ;* This file is part of Libav.
9 ;* Libav is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* Libav is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with Libav; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
24 %include "libavutil/x86/x86util.asm"
38 rgb_Yrnd: times 4 dd 0x84000 ; 16.5 << 15
39 rgb_UVrnd: times 4 dd 0x404000 ; 128.5 << 15
40 bgr_Ycoeff_12x4: times 2 dw BY, GY, 0, BY
41 bgr_Ycoeff_3x56: times 2 dw RY, 0, GY, RY
42 rgb_Ycoeff_12x4: times 2 dw RY, GY, 0, RY
43 rgb_Ycoeff_3x56: times 2 dw BY, 0, GY, BY
44 bgr_Ucoeff_12x4: times 2 dw BU, GU, 0, BU
45 bgr_Ucoeff_3x56: times 2 dw RU, 0, GU, RU
46 rgb_Ucoeff_12x4: times 2 dw RU, GU, 0, RU
47 rgb_Ucoeff_3x56: times 2 dw BU, 0, GU, BU
48 bgr_Vcoeff_12x4: times 2 dw BV, GV, 0, BV
49 bgr_Vcoeff_3x56: times 2 dw RV, 0, GV, RV
50 rgb_Vcoeff_12x4: times 2 dw RV, GV, 0, RV
51 rgb_Vcoeff_3x56: times 2 dw BV, 0, GV, BV
53 rgba_Ycoeff_rb: times 4 dw RY, BY
54 rgba_Ycoeff_br: times 4 dw BY, RY
55 rgba_Ycoeff_ga: times 4 dw GY, 0
56 rgba_Ycoeff_ag: times 4 dw 0, GY
57 rgba_Ucoeff_rb: times 4 dw RU, BU
58 rgba_Ucoeff_br: times 4 dw BU, RU
59 rgba_Ucoeff_ga: times 4 dw GU, 0
60 rgba_Ucoeff_ag: times 4 dw 0, GU
61 rgba_Vcoeff_rb: times 4 dw RV, BV
62 rgba_Vcoeff_br: times 4 dw BV, RV
63 rgba_Vcoeff_ga: times 4 dw GV, 0
64 rgba_Vcoeff_ag: times 4 dw 0, GV
66 shuf_rgb_12x4: db 0, 0x80, 1, 0x80, 2, 0x80, 3, 0x80, \
67 6, 0x80, 7, 0x80, 8, 0x80, 9, 0x80
68 shuf_rgb_3x56: db 2, 0x80, 3, 0x80, 4, 0x80, 5, 0x80, \
69 8, 0x80, 9, 0x80, 10, 0x80, 11, 0x80
73 ;-----------------------------------------------------------------------------
76 ; void <fmt>ToY_<opt>(uint8_t *dst, const uint8_t *src, int w);
78 ; void <fmt>toUV_<opt>(uint8_t *dstU, uint8_t *dstV, const uint8_t *src,
79 ; const uint8_t *unused, int w);
80 ;-----------------------------------------------------------------------------
82 ; %1 = nr. of XMM registers
84 %macro RGB24_TO_Y_FN 2-3
85 cglobal %2 %+ 24ToY, 3, 3, %1, dst, src, w
87 mova m5, [%2_Ycoeff_12x4]
88 mova m6, [%2_Ycoeff_3x56]
92 mova m8, [%2_Ycoeff_12x4]
93 mova m9, [%2_Ycoeff_3x56]
96 %else ; x86-32 && mmsize == 16
97 %define coeff1 [%2_Ycoeff_12x4]
98 %define coeff2 [%2_Ycoeff_3x56]
99 %endif ; x86-32/64 && mmsize == 8/16
100 %if (ARCH_X86_64 || mmsize == 8) && %0 == 3
101 jmp mangle(private_prefix %+ _ %+ %3 %+ 24ToY %+ SUFFIX).body
102 %else ; (ARCH_X86_64 && %0 == 3) || mmsize == 8
105 mova m7, [shuf_rgb_12x4]
108 mova m10, [shuf_rgb_3x56]
109 %define shuf_rgb2 m10
111 %define shuf_rgb2 [shuf_rgb_3x56]
113 %endif ; cpuflag(ssse3)
119 %if notcpuflag(ssse3)
121 %endif ; !cpuflag(ssse3)
125 movu m0, [srcq+0] ; (byte) { Bx, Gx, Rx }[0-3]
126 movu m2, [srcq+12] ; (byte) { Bx, Gx, Rx }[4-7]
127 pshufb m1, m0, shuf_rgb2 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
128 pshufb m0, shuf_rgb1 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
129 pshufb m3, m2, shuf_rgb2 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
130 pshufb m2, shuf_rgb1 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
131 %else ; !cpuflag(ssse3)
132 movd m0, [srcq+0] ; (byte) { B0, G0, R0, B1 }
133 movd m1, [srcq+2] ; (byte) { R0, B1, G1, R1 }
134 movd m2, [srcq+6] ; (byte) { B2, G2, R2, B3 }
135 movd m3, [srcq+8] ; (byte) { R2, B3, G3, R3 }
136 %if mmsize == 16 ; i.e. sse2
137 punpckldq m0, m2 ; (byte) { B0, G0, R0, B1, B2, G2, R2, B3 }
138 punpckldq m1, m3 ; (byte) { R0, B1, G1, R1, R2, B3, G3, R3 }
139 movd m2, [srcq+12] ; (byte) { B4, G4, R4, B5 }
140 movd m3, [srcq+14] ; (byte) { R4, B5, G5, R5 }
141 movd m5, [srcq+18] ; (byte) { B6, G6, R6, B7 }
142 movd m6, [srcq+20] ; (byte) { R6, B7, G7, R7 }
143 punpckldq m2, m5 ; (byte) { B4, G4, R4, B5, B6, G6, R6, B7 }
144 punpckldq m3, m6 ; (byte) { R4, B5, G5, R5, R6, B7, G7, R7 }
145 %endif ; mmsize == 16
146 punpcklbw m0, m7 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
147 punpcklbw m1, m7 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
148 punpcklbw m2, m7 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
149 punpcklbw m3, m7 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
150 %endif ; cpuflag(ssse3)
151 add srcq, 3 * mmsize / 2
152 pmaddwd m0, coeff1 ; (dword) { B0*BY + G0*GY, B1*BY, B2*BY + G2*GY, B3*BY }
153 pmaddwd m1, coeff2 ; (dword) { R0*RY, G1+GY + R1*RY, R2*RY, G3+GY + R3*RY }
154 pmaddwd m2, coeff1 ; (dword) { B4*BY + G4*GY, B5*BY, B6*BY + G6*GY, B7*BY }
155 pmaddwd m3, coeff2 ; (dword) { R4*RY, G5+GY + R5*RY, R6*RY, G7+GY + R7*RY }
156 paddd m0, m1 ; (dword) { Bx*BY + Gx*GY + Rx*RY }[0-3]
157 paddd m2, m3 ; (dword) { Bx*BY + Gx*GY + Rx*RY }[4-7]
158 paddd m0, m4 ; += rgb_Yrnd, i.e. (dword) { Y[0-3] }
159 paddd m2, m4 ; += rgb_Yrnd, i.e. (dword) { Y[4-7] }
162 packssdw m0, m2 ; (word) { Y[0-7] }
163 packuswb m0, m0 ; (byte) { Y[0-7] }
168 %endif ; (ARCH_X86_64 && %0 == 3) || mmsize == 8
171 ; %1 = nr. of XMM registers
173 %macro RGB24_TO_UV_FN 2-3
174 cglobal %2 %+ 24ToUV, 3, 4, %1, dstU, dstV, src, w
176 mova m8, [%2_Ucoeff_12x4]
177 mova m9, [%2_Ucoeff_3x56]
178 mova m10, [%2_Vcoeff_12x4]
179 mova m11, [%2_Vcoeff_3x56]
185 %define coeffU1 [%2_Ucoeff_12x4]
186 %define coeffU2 [%2_Ucoeff_3x56]
187 %define coeffV1 [%2_Vcoeff_12x4]
188 %define coeffV2 [%2_Vcoeff_3x56]
190 %if ARCH_X86_64 && %0 == 3
191 jmp mangle(private_prefix %+ _ %+ %3 %+ 24ToUV %+ SUFFIX).body
192 %else ; ARCH_X86_64 && %0 == 3
195 mova m7, [shuf_rgb_12x4]
198 mova m12, [shuf_rgb_3x56]
199 %define shuf_rgb2 m12
201 %define shuf_rgb2 [shuf_rgb_3x56]
203 %endif ; cpuflag(ssse3)
213 %if notcpuflag(ssse3)
218 movu m0, [srcq+0] ; (byte) { Bx, Gx, Rx }[0-3]
219 movu m4, [srcq+12] ; (byte) { Bx, Gx, Rx }[4-7]
220 pshufb m1, m0, shuf_rgb2 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
221 pshufb m0, shuf_rgb1 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
222 %else ; !cpuflag(ssse3)
223 movd m0, [srcq+0] ; (byte) { B0, G0, R0, B1 }
224 movd m1, [srcq+2] ; (byte) { R0, B1, G1, R1 }
225 movd m4, [srcq+6] ; (byte) { B2, G2, R2, B3 }
226 movd m5, [srcq+8] ; (byte) { R2, B3, G3, R3 }
228 punpckldq m0, m4 ; (byte) { B0, G0, R0, B1, B2, G2, R2, B3 }
229 punpckldq m1, m5 ; (byte) { R0, B1, G1, R1, R2, B3, G3, R3 }
230 movd m4, [srcq+12] ; (byte) { B4, G4, R4, B5 }
231 movd m5, [srcq+14] ; (byte) { R4, B5, G5, R5 }
232 %endif ; mmsize == 16
233 punpcklbw m0, m7 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
234 punpcklbw m1, m7 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
235 %endif ; cpuflag(ssse3)
236 pmaddwd m2, m0, coeffV1 ; (dword) { B0*BV + G0*GV, B1*BV, B2*BV + G2*GV, B3*BV }
237 pmaddwd m3, m1, coeffV2 ; (dword) { R0*BV, G1*GV + R1*BV, R2*BV, G3*GV + R3*BV }
238 pmaddwd m0, coeffU1 ; (dword) { B0*BU + G0*GU, B1*BU, B2*BU + G2*GU, B3*BU }
239 pmaddwd m1, coeffU2 ; (dword) { R0*BU, G1*GU + R1*BU, R2*BU, G3*GU + R3*BU }
240 paddd m0, m1 ; (dword) { Bx*BU + Gx*GU + Rx*RU }[0-3]
241 paddd m2, m3 ; (dword) { Bx*BV + Gx*GV + Rx*RV }[0-3]
243 pshufb m5, m4, shuf_rgb2 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
244 pshufb m4, shuf_rgb1 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
245 %else ; !cpuflag(ssse3)
247 movd m1, [srcq+18] ; (byte) { B6, G6, R6, B7 }
248 movd m3, [srcq+20] ; (byte) { R6, B7, G7, R7 }
249 punpckldq m4, m1 ; (byte) { B4, G4, R4, B5, B6, G6, R6, B7 }
250 punpckldq m5, m3 ; (byte) { R4, B5, G5, R5, R6, B7, G7, R7 }
251 %endif ; mmsize == 16 && !cpuflag(ssse3)
252 punpcklbw m4, m7 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
253 punpcklbw m5, m7 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
254 %endif ; cpuflag(ssse3)
255 add srcq, 3 * mmsize / 2
256 pmaddwd m1, m4, coeffU1 ; (dword) { B4*BU + G4*GU, B5*BU, B6*BU + G6*GU, B7*BU }
257 pmaddwd m3, m5, coeffU2 ; (dword) { R4*BU, G5*GU + R5*BU, R6*BU, G7*GU + R7*BU }
258 pmaddwd m4, coeffV1 ; (dword) { B4*BV + G4*GV, B5*BV, B6*BV + G6*GV, B7*BV }
259 pmaddwd m5, coeffV2 ; (dword) { R4*BV, G5*GV + R5*BV, R6*BV, G7*GV + R7*BV }
260 paddd m1, m3 ; (dword) { Bx*BU + Gx*GU + Rx*RU }[4-7]
261 paddd m4, m5 ; (dword) { Bx*BV + Gx*GV + Rx*RV }[4-7]
262 paddd m0, m6 ; += rgb_UVrnd, i.e. (dword) { U[0-3] }
263 paddd m2, m6 ; += rgb_UVrnd, i.e. (dword) { V[0-3] }
264 paddd m1, m6 ; += rgb_UVrnd, i.e. (dword) { U[4-7] }
265 paddd m4, m6 ; += rgb_UVrnd, i.e. (dword) { V[4-7] }
270 packssdw m0, m1 ; (word) { U[0-7] }
271 packssdw m2, m4 ; (word) { V[0-7] }
273 packuswb m0, m0 ; (byte) { U[0-3] }
274 packuswb m2, m2 ; (byte) { V[0-3] }
278 packuswb m0, m2 ; (byte) { U[0-7], V[0-7] }
280 movhps [dstVq+wq], m0
281 %endif ; mmsize == 8/16
285 %endif ; ARCH_X86_64 && %0 == 3
288 ; %1 = nr. of XMM registers for rgb-to-Y func
289 ; %2 = nr. of XMM registers for rgb-to-UV func
291 RGB24_TO_Y_FN %1, rgb
292 RGB24_TO_Y_FN %1, bgr, rgb
293 RGB24_TO_UV_FN %2, rgb
294 RGB24_TO_UV_FN %2, bgr, rgb
311 ; %1 = nr. of XMM registers
312 ; %2-5 = rgba, bgra, argb or abgr (in individual characters)
313 %macro RGB32_TO_Y_FN 5-6
314 cglobal %2%3%4%5 %+ ToY, 3, 3, %1, dst, src, w
315 mova m5, [rgba_Ycoeff_%2%4]
316 mova m6, [rgba_Ycoeff_%3%5]
318 jmp mangle(private_prefix %+ _ %+ %6 %+ ToY %+ SUFFIX).body
324 lea srcq, [srcq+wq*4]
329 psrlw m7, 8 ; (word) { 0x00ff } x4
331 ; FIXME check alignment and use mova
332 movu m0, [srcq+wq*4+0] ; (byte) { Bx, Gx, Rx, xx }[0-3]
333 movu m2, [srcq+wq*4+mmsize] ; (byte) { Bx, Gx, Rx, xx }[4-7]
334 DEINTB 1, 0, 3, 2, 7 ; (word) { Gx, xx (m0/m2) or Bx, Rx (m1/m3) }[0-3]/[4-7]
335 pmaddwd m1, m5 ; (dword) { Bx*BY + Rx*RY }[0-3]
336 pmaddwd m0, m6 ; (dword) { Gx*GY }[0-3]
337 pmaddwd m3, m5 ; (dword) { Bx*BY + Rx*RY }[4-7]
338 pmaddwd m2, m6 ; (dword) { Gx*GY }[4-7]
339 paddd m0, m4 ; += rgb_Yrnd
340 paddd m2, m4 ; += rgb_Yrnd
341 paddd m0, m1 ; (dword) { Y[0-3] }
342 paddd m2, m3 ; (dword) { Y[4-7] }
345 packssdw m0, m2 ; (word) { Y[0-7] }
346 packuswb m0, m0 ; (byte) { Y[0-7] }
354 ; %1 = nr. of XMM registers
355 ; %2-5 = rgba, bgra, argb or abgr (in individual characters)
356 %macro RGB32_TO_UV_FN 5-6
357 cglobal %2%3%4%5 %+ ToUV, 3, 4, %1, dstU, dstV, src, w
359 mova m8, [rgba_Ucoeff_%2%4]
360 mova m9, [rgba_Ucoeff_%3%5]
361 mova m10, [rgba_Vcoeff_%2%4]
362 mova m11, [rgba_Vcoeff_%3%5]
368 %define coeffU1 [rgba_Ucoeff_%2%4]
369 %define coeffU2 [rgba_Ucoeff_%3%5]
370 %define coeffV1 [rgba_Vcoeff_%2%4]
371 %define coeffV2 [rgba_Vcoeff_%3%5]
373 %if ARCH_X86_64 && %0 == 6
374 jmp mangle(private_prefix %+ _ %+ %6 %+ ToUV %+ SUFFIX).body
375 %else ; ARCH_X86_64 && %0 == 6
384 lea srcq, [srcq+wq*4]
387 psrlw m7, 8 ; (word) { 0x00ff } x4
390 ; FIXME check alignment and use mova
391 movu m0, [srcq+wq*4+0] ; (byte) { Bx, Gx, Rx, xx }[0-3]
392 movu m4, [srcq+wq*4+mmsize] ; (byte) { Bx, Gx, Rx, xx }[4-7]
393 DEINTB 1, 0, 5, 4, 7 ; (word) { Gx, xx (m0/m4) or Bx, Rx (m1/m5) }[0-3]/[4-7]
394 pmaddwd m3, m1, coeffV1 ; (dword) { Bx*BV + Rx*RV }[0-3]
395 pmaddwd m2, m0, coeffV2 ; (dword) { Gx*GV }[0-3]
396 pmaddwd m1, coeffU1 ; (dword) { Bx*BU + Rx*RU }[0-3]
397 pmaddwd m0, coeffU2 ; (dword) { Gx*GU }[0-3]
398 paddd m3, m6 ; += rgb_UVrnd
399 paddd m1, m6 ; += rgb_UVrnd
400 paddd m2, m3 ; (dword) { V[0-3] }
401 paddd m0, m1 ; (dword) { U[0-3] }
402 pmaddwd m3, m5, coeffV1 ; (dword) { Bx*BV + Rx*RV }[4-7]
403 pmaddwd m1, m4, coeffV2 ; (dword) { Gx*GV }[4-7]
404 pmaddwd m5, coeffU1 ; (dword) { Bx*BU + Rx*RU }[4-7]
405 pmaddwd m4, coeffU2 ; (dword) { Gx*GU }[4-7]
406 paddd m3, m6 ; += rgb_UVrnd
407 paddd m5, m6 ; += rgb_UVrnd
409 paddd m1, m3 ; (dword) { V[4-7] }
410 paddd m4, m5 ; (dword) { U[4-7] }
414 packssdw m0, m4 ; (word) { U[0-7] }
415 packssdw m2, m1 ; (word) { V[0-7] }
417 packuswb m0, m0 ; (byte) { U[0-7] }
418 packuswb m2, m2 ; (byte) { V[0-7] }
422 packuswb m0, m2 ; (byte) { U[0-7], V[0-7] }
424 movhps [dstVq+wq], m0
425 %endif ; mmsize == 8/16
429 %endif ; ARCH_X86_64 && %0 == 3
432 ; %1 = nr. of XMM registers for rgb-to-Y func
433 ; %2 = nr. of XMM registers for rgb-to-UV func
435 RGB32_TO_Y_FN %1, r, g, b, a
436 RGB32_TO_Y_FN %1, b, g, r, a, rgba
437 RGB32_TO_Y_FN %1, a, r, g, b, rgba
438 RGB32_TO_Y_FN %1, a, b, g, r, rgba
440 RGB32_TO_UV_FN %2, r, g, b, a
441 RGB32_TO_UV_FN %2, b, g, r, a, rgba
442 RGB32_TO_UV_FN %2, a, r, g, b, rgba
443 RGB32_TO_UV_FN %2, a, b, g, r, rgba
457 ;-----------------------------------------------------------------------------
458 ; YUYV/UYVY/NV12/NV21 packed pixel shuffling.
460 ; void <fmt>ToY_<opt>(uint8_t *dst, const uint8_t *src, int w);
462 ; void <fmt>toUV_<opt>(uint8_t *dstU, uint8_t *dstV, const uint8_t *src,
463 ; const uint8_t *unused, int w);
464 ;-----------------------------------------------------------------------------
466 ; %1 = a (aligned) or u (unaligned)
468 %macro LOOP_YUYV_TO_Y 2
470 mov%1 m0, [srcq+wq*2] ; (byte) { Y0, U0, Y1, V0, ... }
471 mov%1 m1, [srcq+wq*2+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
473 pand m0, m2 ; (word) { Y0, Y1, ..., Y7 }
474 pand m1, m2 ; (word) { Y8, Y9, ..., Y15 }
476 psrlw m0, 8 ; (word) { Y0, Y1, ..., Y7 }
477 psrlw m1, 8 ; (word) { Y8, Y9, ..., Y15 }
479 packuswb m0, m1 ; (byte) { Y0, ..., Y15 }
486 ; %1 = nr. of XMM registers
488 ; %3 = if specified, it means that unaligned and aligned code in loop
489 ; will be the same (i.e. YUYV+AVX), and thus we don't need to
490 ; split the loop in an aligned and unaligned case
491 %macro YUYV_TO_Y_FN 2-3
492 cglobal %2ToY, 3, 3, %1, dst, src, w
500 lea srcq, [srcq+wq*2]
502 pcmpeqb m2, m2 ; (byte) { 0xff } x 16
503 psrlw m2, 8 ; (word) { 0x00ff } x 8
515 %endif ; mmsize == 8/16
518 ; %1 = a (aligned) or u (unaligned)
520 %macro LOOP_YUYV_TO_UV 2
523 mov%1 m0, [srcq+wq*4] ; (byte) { Y0, U0, Y1, V0, ... }
524 mov%1 m1, [srcq+wq*4+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
525 psrlw m0, 8 ; (word) { U0, V0, ..., U3, V3 }
526 psrlw m1, 8 ; (word) { U4, V4, ..., U7, V7 }
529 vpand m0, m2, [srcq+wq*4] ; (word) { U0, V0, ..., U3, V3 }
530 vpand m1, m2, [srcq+wq*4+mmsize] ; (word) { U4, V4, ..., U7, V7 }
532 mov%1 m0, [srcq+wq*4] ; (byte) { Y0, U0, Y1, V0, ... }
533 mov%1 m1, [srcq+wq*4+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
534 pand m0, m2 ; (word) { U0, V0, ..., U3, V3 }
535 pand m1, m2 ; (word) { U4, V4, ..., U7, V7 }
538 packuswb m0, m1 ; (byte) { U0, V0, ..., U7, V7 }
539 pand m1, m0, m2 ; (word) { U0, U1, ..., U7 }
540 psrlw m0, 8 ; (word) { V0, V1, ..., V7 }
542 packuswb m1, m0 ; (byte) { U0, ... U7, V1, ... V7 }
544 movhps [dstVq+wq], m1
546 packuswb m1, m1 ; (byte) { U0, ... U3 }
547 packuswb m0, m0 ; (byte) { V0, ... V3 }
550 %endif ; mmsize == 8/16
556 ; %1 = nr. of XMM registers
558 ; %3 = if specified, it means that unaligned and aligned code in loop
559 ; will be the same (i.e. UYVY+AVX), and thus we don't need to
560 ; split the loop in an aligned and unaligned case
561 %macro YUYV_TO_UV_FN 2-3
562 cglobal %2ToUV, 3, 4, %1, dstU, dstV, src, w
570 %if mmsize == 16 && %0 == 2
573 lea srcq, [srcq+wq*4]
574 pcmpeqb m2, m2 ; (byte) { 0xff } x 16
575 psrlw m2, 8 ; (word) { 0x00ff } x 8
576 ; NOTE: if uyvy+avx, u/a are identical
577 %if mmsize == 16 && %0 == 2
580 LOOP_YUYV_TO_UV a, %2
583 LOOP_YUYV_TO_UV u, %2
586 LOOP_YUYV_TO_UV a, %2
587 %endif ; mmsize == 8/16
590 ; %1 = a (aligned) or u (unaligned)
592 %macro LOOP_NVXX_TO_UV 2
594 mov%1 m0, [srcq+wq*2] ; (byte) { U0, V0, U1, V1, ... }
595 mov%1 m1, [srcq+wq*2+mmsize] ; (byte) { U8, V8, U9, V9, ... }
596 pand m2, m0, m4 ; (word) { U0, U1, ..., U7 }
597 pand m3, m1, m4 ; (word) { U8, U9, ..., U15 }
598 psrlw m0, 8 ; (word) { V0, V1, ..., V7 }
599 psrlw m1, 8 ; (word) { V8, V9, ..., V15 }
600 packuswb m2, m3 ; (byte) { U0, ..., U15 }
601 packuswb m0, m1 ; (byte) { V0, ..., V15 }
614 ; %1 = nr. of XMM registers
616 %macro NVXX_TO_UV_FN 2
617 cglobal %2ToUV, 3, 4, %1, dstU, dstV, src, w
628 lea srcq, [srcq+wq*2]
629 pcmpeqb m4, m4 ; (byte) { 0xff } x 16
630 psrlw m4, 8 ; (word) { 0x00ff } x 8
634 LOOP_NVXX_TO_UV a, %2
637 LOOP_NVXX_TO_UV u, %2
640 LOOP_NVXX_TO_UV a, %2
641 %endif ; mmsize == 8/16
648 YUYV_TO_UV_FN 0, yuyv
649 YUYV_TO_UV_FN 0, uyvy
650 NVXX_TO_UV_FN 0, nv12
651 NVXX_TO_UV_FN 0, nv21
657 YUYV_TO_UV_FN 3, yuyv
658 YUYV_TO_UV_FN 3, uyvy
659 NVXX_TO_UV_FN 5, nv12
660 NVXX_TO_UV_FN 5, nv21
663 ; in theory, we could write a yuy2-to-y using vpand (i.e. AVX), but
664 ; that's not faster in practice
665 YUYV_TO_UV_FN 3, yuyv
666 YUYV_TO_UV_FN 3, uyvy, 1
667 NVXX_TO_UV_FN 5, nv12
668 NVXX_TO_UV_FN 5, nv21