1 ;******************************************************************************
2 ;* x86-optimized input routines; does shuffling of packed
3 ;* YUV formats into individual planes, and converts RGB
4 ;* into YUV planes also.
5 ;* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
7 ;* This file is part of Libav.
9 ;* Libav is free software; you can redistribute it and/or
10 ;* modify it under the terms of the GNU Lesser General Public
11 ;* License as published by the Free Software Foundation; either
12 ;* version 2.1 of the License, or (at your option) any later version.
14 ;* Libav is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 ;* Lesser General Public License for more details.
19 ;* You should have received a copy of the GNU Lesser General Public
20 ;* License along with Libav; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 ;******************************************************************************
25 %include "x86util.asm"
39 rgb_Yrnd: times 4 dd 0x80100 ; 16.5 << 15
40 rgb_UVrnd: times 4 dd 0x400100 ; 128.5 << 15
41 bgr_Ycoeff_12x4: times 2 dw BY, GY, 0, BY
42 bgr_Ycoeff_3x56: times 2 dw RY, 0, GY, RY
43 rgb_Ycoeff_12x4: times 2 dw RY, GY, 0, RY
44 rgb_Ycoeff_3x56: times 2 dw BY, 0, GY, BY
45 bgr_Ucoeff_12x4: times 2 dw BU, GU, 0, BU
46 bgr_Ucoeff_3x56: times 2 dw RU, 0, GU, RU
47 rgb_Ucoeff_12x4: times 2 dw RU, GU, 0, RU
48 rgb_Ucoeff_3x56: times 2 dw BU, 0, GU, BU
49 bgr_Vcoeff_12x4: times 2 dw BV, GV, 0, BV
50 bgr_Vcoeff_3x56: times 2 dw RV, 0, GV, RV
51 rgb_Vcoeff_12x4: times 2 dw RV, GV, 0, RV
52 rgb_Vcoeff_3x56: times 2 dw BV, 0, GV, BV
54 rgba_Ycoeff_rb: times 4 dw RY, BY
55 rgba_Ycoeff_br: times 4 dw BY, RY
56 rgba_Ycoeff_ga: times 4 dw GY, 0
57 rgba_Ycoeff_ag: times 4 dw 0, GY
58 rgba_Ucoeff_rb: times 4 dw RU, BU
59 rgba_Ucoeff_br: times 4 dw BU, RU
60 rgba_Ucoeff_ga: times 4 dw GU, 0
61 rgba_Ucoeff_ag: times 4 dw 0, GU
62 rgba_Vcoeff_rb: times 4 dw RV, BV
63 rgba_Vcoeff_br: times 4 dw BV, RV
64 rgba_Vcoeff_ga: times 4 dw GV, 0
65 rgba_Vcoeff_ag: times 4 dw 0, GV
67 shuf_rgb_12x4: db 0, 0x80, 1, 0x80, 2, 0x80, 3, 0x80, \
68 6, 0x80, 7, 0x80, 8, 0x80, 9, 0x80
69 shuf_rgb_3x56: db 2, 0x80, 3, 0x80, 4, 0x80, 5, 0x80, \
70 8, 0x80, 9, 0x80, 10, 0x80, 11, 0x80
74 ;-----------------------------------------------------------------------------
77 ; void <fmt>ToY_<opt>(uint8_t *dst, const uint8_t *src, int w);
79 ; void <fmt>toUV_<opt>(uint8_t *dstU, uint8_t *dstV, const uint8_t *src,
80 ; const uint8_t *unused, int w);
81 ;-----------------------------------------------------------------------------
83 ; %1 = nr. of XMM registers
85 %macro RGB24_TO_Y_FN 2-3
86 cglobal %2 %+ 24ToY, 6, 6, %1, dst, src, u1, u2, w, u3
88 mova m5, [%2_Ycoeff_12x4]
89 mova m6, [%2_Ycoeff_3x56]
93 mova m8, [%2_Ycoeff_12x4]
94 mova m9, [%2_Ycoeff_3x56]
97 %else ; x86-32 && mmsize == 16
98 %define coeff1 [%2_Ycoeff_12x4]
99 %define coeff2 [%2_Ycoeff_3x56]
100 %endif ; x86-32/64 && mmsize == 8/16
101 %if (ARCH_X86_64 || mmsize == 8) && %0 == 3
102 jmp mangle(program_name %+ _ %+ %3 %+ 24ToY %+ SUFFIX).body
103 %else ; (ARCH_X86_64 && %0 == 3) || mmsize == 8
106 mova m7, [shuf_rgb_12x4]
109 mova m10, [shuf_rgb_3x56]
110 %define shuf_rgb2 m10
112 %define shuf_rgb2 [shuf_rgb_3x56]
114 %endif ; cpuflag(ssse3)
121 %if notcpuflag(ssse3)
123 %endif ; !cpuflag(ssse3)
127 movu m0, [srcq+0] ; (byte) { Bx, Gx, Rx }[0-3]
128 movu m2, [srcq+12] ; (byte) { Bx, Gx, Rx }[4-7]
129 pshufb m1, m0, shuf_rgb2 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
130 pshufb m0, shuf_rgb1 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
131 pshufb m3, m2, shuf_rgb2 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
132 pshufb m2, shuf_rgb1 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
133 %else ; !cpuflag(ssse3)
134 movd m0, [srcq+0] ; (byte) { B0, G0, R0, B1 }
135 movd m1, [srcq+2] ; (byte) { R0, B1, G1, R1 }
136 movd m2, [srcq+6] ; (byte) { B2, G2, R2, B3 }
137 movd m3, [srcq+8] ; (byte) { R2, B3, G3, R3 }
138 %if mmsize == 16 ; i.e. sse2
139 punpckldq m0, m2 ; (byte) { B0, G0, R0, B1, B2, G2, R2, B3 }
140 punpckldq m1, m3 ; (byte) { R0, B1, G1, R1, R2, B3, G3, R3 }
141 movd m2, [srcq+12] ; (byte) { B4, G4, R4, B5 }
142 movd m3, [srcq+14] ; (byte) { R4, B5, G5, R5 }
143 movd m5, [srcq+18] ; (byte) { B6, G6, R6, B7 }
144 movd m6, [srcq+20] ; (byte) { R6, B7, G7, R7 }
145 punpckldq m2, m5 ; (byte) { B4, G4, R4, B5, B6, G6, R6, B7 }
146 punpckldq m3, m6 ; (byte) { R4, B5, G5, R5, R6, B7, G7, R7 }
147 %endif ; mmsize == 16
148 punpcklbw m0, m7 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
149 punpcklbw m1, m7 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
150 punpcklbw m2, m7 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
151 punpcklbw m3, m7 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
152 %endif ; cpuflag(ssse3)
153 add srcq, 3 * mmsize / 2
154 pmaddwd m0, coeff1 ; (dword) { B0*BY + G0*GY, B1*BY, B2*BY + G2*GY, B3*BY }
155 pmaddwd m1, coeff2 ; (dword) { R0*RY, G1+GY + R1*RY, R2*RY, G3+GY + R3*RY }
156 pmaddwd m2, coeff1 ; (dword) { B4*BY + G4*GY, B5*BY, B6*BY + G6*GY, B7*BY }
157 pmaddwd m3, coeff2 ; (dword) { R4*RY, G5+GY + R5*RY, R6*RY, G7+GY + R7*RY }
158 paddd m0, m1 ; (dword) { Bx*BY + Gx*GY + Rx*RY }[0-3]
159 paddd m2, m3 ; (dword) { Bx*BY + Gx*GY + Rx*RY }[4-7]
160 paddd m0, m4 ; += rgb_Yrnd, i.e. (dword) { Y[0-3] }
161 paddd m2, m4 ; += rgb_Yrnd, i.e. (dword) { Y[4-7] }
164 packssdw m0, m2 ; (word) { Y[0-7] }
169 %endif ; (ARCH_X86_64 && %0 == 3) || mmsize == 8
172 ; %1 = nr. of XMM registers
174 %macro RGB24_TO_UV_FN 2-3
175 cglobal %2 %+ 24ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, u3
177 mova m8, [%2_Ucoeff_12x4]
178 mova m9, [%2_Ucoeff_3x56]
179 mova m10, [%2_Vcoeff_12x4]
180 mova m11, [%2_Vcoeff_3x56]
186 %define coeffU1 [%2_Ucoeff_12x4]
187 %define coeffU2 [%2_Ucoeff_3x56]
188 %define coeffV1 [%2_Vcoeff_12x4]
189 %define coeffV2 [%2_Vcoeff_3x56]
191 %if ARCH_X86_64 && %0 == 3
192 jmp mangle(program_name %+ _ %+ %3 %+ 24ToUV %+ SUFFIX).body
193 %else ; ARCH_X86_64 && %0 == 3
196 mova m7, [shuf_rgb_12x4]
199 mova m12, [shuf_rgb_3x56]
200 %define shuf_rgb2 m12
202 %define shuf_rgb2 [shuf_rgb_3x56]
204 %endif ; cpuflag(ssse3)
215 %if notcpuflag(ssse3)
220 movu m0, [srcq+0] ; (byte) { Bx, Gx, Rx }[0-3]
221 movu m4, [srcq+12] ; (byte) { Bx, Gx, Rx }[4-7]
222 pshufb m1, m0, shuf_rgb2 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
223 pshufb m0, shuf_rgb1 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
224 %else ; !cpuflag(ssse3)
225 movd m0, [srcq+0] ; (byte) { B0, G0, R0, B1 }
226 movd m1, [srcq+2] ; (byte) { R0, B1, G1, R1 }
227 movd m4, [srcq+6] ; (byte) { B2, G2, R2, B3 }
228 movd m5, [srcq+8] ; (byte) { R2, B3, G3, R3 }
230 punpckldq m0, m4 ; (byte) { B0, G0, R0, B1, B2, G2, R2, B3 }
231 punpckldq m1, m5 ; (byte) { R0, B1, G1, R1, R2, B3, G3, R3 }
232 movd m4, [srcq+12] ; (byte) { B4, G4, R4, B5 }
233 movd m5, [srcq+14] ; (byte) { R4, B5, G5, R5 }
234 %endif ; mmsize == 16
235 punpcklbw m0, m7 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
236 punpcklbw m1, m7 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
237 %endif ; cpuflag(ssse3)
238 pmaddwd m2, m0, coeffV1 ; (dword) { B0*BV + G0*GV, B1*BV, B2*BV + G2*GV, B3*BV }
239 pmaddwd m3, m1, coeffV2 ; (dword) { R0*BV, G1*GV + R1*BV, R2*BV, G3*GV + R3*BV }
240 pmaddwd m0, coeffU1 ; (dword) { B0*BU + G0*GU, B1*BU, B2*BU + G2*GU, B3*BU }
241 pmaddwd m1, coeffU2 ; (dword) { R0*BU, G1*GU + R1*BU, R2*BU, G3*GU + R3*BU }
242 paddd m0, m1 ; (dword) { Bx*BU + Gx*GU + Rx*RU }[0-3]
243 paddd m2, m3 ; (dword) { Bx*BV + Gx*GV + Rx*RV }[0-3]
245 pshufb m5, m4, shuf_rgb2 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
246 pshufb m4, shuf_rgb1 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
247 %else ; !cpuflag(ssse3)
249 movd m1, [srcq+18] ; (byte) { B6, G6, R6, B7 }
250 movd m3, [srcq+20] ; (byte) { R6, B7, G7, R7 }
251 punpckldq m4, m1 ; (byte) { B4, G4, R4, B5, B6, G6, R6, B7 }
252 punpckldq m5, m3 ; (byte) { R4, B5, G5, R5, R6, B7, G7, R7 }
253 %endif ; mmsize == 16 && !cpuflag(ssse3)
254 punpcklbw m4, m7 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
255 punpcklbw m5, m7 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
256 %endif ; cpuflag(ssse3)
257 add srcq, 3 * mmsize / 2
258 pmaddwd m1, m4, coeffU1 ; (dword) { B4*BU + G4*GU, B5*BU, B6*BU + G6*GU, B7*BU }
259 pmaddwd m3, m5, coeffU2 ; (dword) { R4*BU, G5*GU + R5*BU, R6*BU, G7*GU + R7*BU }
260 pmaddwd m4, coeffV1 ; (dword) { B4*BV + G4*GV, B5*BV, B6*BV + G6*GV, B7*BV }
261 pmaddwd m5, coeffV2 ; (dword) { R4*BV, G5*GV + R5*BV, R6*BV, G7*GV + R7*BV }
262 paddd m1, m3 ; (dword) { Bx*BU + Gx*GU + Rx*RU }[4-7]
263 paddd m4, m5 ; (dword) { Bx*BV + Gx*GV + Rx*RV }[4-7]
264 paddd m0, m6 ; += rgb_UVrnd, i.e. (dword) { U[0-3] }
265 paddd m2, m6 ; += rgb_UVrnd, i.e. (dword) { V[0-3] }
266 paddd m1, m6 ; += rgb_UVrnd, i.e. (dword) { U[4-7] }
267 paddd m4, m6 ; += rgb_UVrnd, i.e. (dword) { V[4-7] }
272 packssdw m0, m1 ; (word) { U[0-7] }
273 packssdw m2, m4 ; (word) { V[0-7] }
280 %endif ; mmsize == 8/16
284 %endif ; ARCH_X86_64 && %0 == 3
287 ; %1 = nr. of XMM registers for rgb-to-Y func
288 ; %2 = nr. of XMM registers for rgb-to-UV func
290 RGB24_TO_Y_FN %1, rgb
291 RGB24_TO_Y_FN %1, bgr, rgb
292 RGB24_TO_UV_FN %2, rgb
293 RGB24_TO_UV_FN %2, bgr, rgb
312 ; %1 = nr. of XMM registers
313 ; %2-5 = rgba, bgra, argb or abgr (in individual characters)
314 %macro RGB32_TO_Y_FN 5-6
315 cglobal %2%3%4%5 %+ ToY, 6, 6, %1, dst, src, u1, u2, w, u3
316 mova m5, [rgba_Ycoeff_%2%4]
317 mova m6, [rgba_Ycoeff_%3%5]
319 jmp mangle(program_name %+ _ %+ %6 %+ ToY %+ SUFFIX).body
325 lea srcq, [srcq+wq*4]
331 psrlw m7, 8 ; (word) { 0x00ff } x4
333 ; FIXME check alignment and use mova
334 movu m0, [srcq+wq*2+0] ; (byte) { Bx, Gx, Rx, xx }[0-3]
335 movu m2, [srcq+wq*2+mmsize] ; (byte) { Bx, Gx, Rx, xx }[4-7]
336 DEINTB 1, 0, 3, 2, 7 ; (word) { Gx, xx (m0/m2) or Bx, Rx (m1/m3) }[0-3]/[4-7]
337 pmaddwd m1, m5 ; (dword) { Bx*BY + Rx*RY }[0-3]
338 pmaddwd m0, m6 ; (dword) { Gx*GY }[0-3]
339 pmaddwd m3, m5 ; (dword) { Bx*BY + Rx*RY }[4-7]
340 pmaddwd m2, m6 ; (dword) { Gx*GY }[4-7]
341 paddd m0, m4 ; += rgb_Yrnd
342 paddd m2, m4 ; += rgb_Yrnd
343 paddd m0, m1 ; (dword) { Y[0-3] }
344 paddd m2, m3 ; (dword) { Y[4-7] }
347 packssdw m0, m2 ; (word) { Y[0-7] }
355 ; %1 = nr. of XMM registers
356 ; %2-5 = rgba, bgra, argb or abgr (in individual characters)
357 %macro RGB32_TO_UV_FN 5-6
358 cglobal %2%3%4%5 %+ ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, u3
360 mova m8, [rgba_Ucoeff_%2%4]
361 mova m9, [rgba_Ucoeff_%3%5]
362 mova m10, [rgba_Vcoeff_%2%4]
363 mova m11, [rgba_Vcoeff_%3%5]
369 %define coeffU1 [rgba_Ucoeff_%2%4]
370 %define coeffU2 [rgba_Ucoeff_%3%5]
371 %define coeffV1 [rgba_Vcoeff_%2%4]
372 %define coeffV2 [rgba_Vcoeff_%3%5]
374 %if ARCH_X86_64 && %0 == 6
375 jmp mangle(program_name %+ _ %+ %6 %+ ToUV %+ SUFFIX).body
376 %else ; ARCH_X86_64 && %0 == 6
386 lea srcq, [srcq+wq*2]
389 psrlw m7, 8 ; (word) { 0x00ff } x4
392 ; FIXME check alignment and use mova
393 movu m0, [srcq+wq*2+0] ; (byte) { Bx, Gx, Rx, xx }[0-3]
394 movu m4, [srcq+wq*2+mmsize] ; (byte) { Bx, Gx, Rx, xx }[4-7]
395 DEINTB 1, 0, 5, 4, 7 ; (word) { Gx, xx (m0/m4) or Bx, Rx (m1/m5) }[0-3]/[4-7]
396 pmaddwd m3, m1, coeffV1 ; (dword) { Bx*BV + Rx*RV }[0-3]
397 pmaddwd m2, m0, coeffV2 ; (dword) { Gx*GV }[0-3]
398 pmaddwd m1, coeffU1 ; (dword) { Bx*BU + Rx*RU }[0-3]
399 pmaddwd m0, coeffU2 ; (dword) { Gx*GU }[0-3]
400 paddd m3, m6 ; += rgb_UVrnd
401 paddd m1, m6 ; += rgb_UVrnd
402 paddd m2, m3 ; (dword) { V[0-3] }
403 paddd m0, m1 ; (dword) { U[0-3] }
404 pmaddwd m3, m5, coeffV1 ; (dword) { Bx*BV + Rx*RV }[4-7]
405 pmaddwd m1, m4, coeffV2 ; (dword) { Gx*GV }[4-7]
406 pmaddwd m5, coeffU1 ; (dword) { Bx*BU + Rx*RU }[4-7]
407 pmaddwd m4, coeffU2 ; (dword) { Gx*GU }[4-7]
408 paddd m3, m6 ; += rgb_UVrnd
409 paddd m5, m6 ; += rgb_UVrnd
411 paddd m1, m3 ; (dword) { V[4-7] }
412 paddd m4, m5 ; (dword) { U[4-7] }
416 packssdw m0, m4 ; (word) { U[0-7] }
417 packssdw m2, m1 ; (word) { V[0-7] }
424 %endif ; mmsize == 8/16
428 %endif ; ARCH_X86_64 && %0 == 3
431 ; %1 = nr. of XMM registers for rgb-to-Y func
432 ; %2 = nr. of XMM registers for rgb-to-UV func
434 RGB32_TO_Y_FN %1, r, g, b, a
435 RGB32_TO_Y_FN %1, b, g, r, a, rgba
436 RGB32_TO_Y_FN %1, a, r, g, b, rgba
437 RGB32_TO_Y_FN %1, a, b, g, r, rgba
439 RGB32_TO_UV_FN %2, r, g, b, a
440 RGB32_TO_UV_FN %2, b, g, r, a, rgba
441 RGB32_TO_UV_FN %2, a, r, g, b, rgba
442 RGB32_TO_UV_FN %2, a, b, g, r, rgba
458 ;-----------------------------------------------------------------------------
459 ; YUYV/UYVY/NV12/NV21 packed pixel shuffling.
461 ; void <fmt>ToY_<opt>(uint8_t *dst, const uint8_t *src, int w);
463 ; void <fmt>toUV_<opt>(uint8_t *dstU, uint8_t *dstV, const uint8_t *src,
464 ; const uint8_t *unused, int w);
465 ;-----------------------------------------------------------------------------
467 ; %1 = a (aligned) or u (unaligned)
469 %macro LOOP_YUYV_TO_Y 2
471 mov%1 m0, [srcq+wq*2] ; (byte) { Y0, U0, Y1, V0, ... }
472 mov%1 m1, [srcq+wq*2+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
474 pand m0, m2 ; (word) { Y0, Y1, ..., Y7 }
475 pand m1, m2 ; (word) { Y8, Y9, ..., Y15 }
477 psrlw m0, 8 ; (word) { Y0, Y1, ..., Y7 }
478 psrlw m1, 8 ; (word) { Y8, Y9, ..., Y15 }
480 packuswb m0, m1 ; (byte) { Y0, ..., Y15 }
487 ; %1 = nr. of XMM registers
489 ; %3 = if specified, it means that unaligned and aligned code in loop
490 ; will be the same (i.e. YUYV+AVX), and thus we don't need to
491 ; split the loop in an aligned and unaligned case
492 %macro YUYV_TO_Y_FN 2-3
493 cglobal %2ToY, 5, 5, %1, dst, unused0, unused1, src, w
501 lea srcq, [srcq+wq*2]
503 pcmpeqb m2, m2 ; (byte) { 0xff } x 16
504 psrlw m2, 8 ; (word) { 0x00ff } x 8
516 %endif ; mmsize == 8/16
519 ; %1 = a (aligned) or u (unaligned)
521 %macro LOOP_YUYV_TO_UV 2
524 mov%1 m0, [srcq+wq*4] ; (byte) { Y0, U0, Y1, V0, ... }
525 mov%1 m1, [srcq+wq*4+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
526 psrlw m0, 8 ; (word) { U0, V0, ..., U3, V3 }
527 psrlw m1, 8 ; (word) { U4, V4, ..., U7, V7 }
530 vpand m0, m2, [srcq+wq*4] ; (word) { U0, V0, ..., U3, V3 }
531 vpand m1, m2, [srcq+wq*4+mmsize] ; (word) { U4, V4, ..., U7, V7 }
533 mov%1 m0, [srcq+wq*4] ; (byte) { Y0, U0, Y1, V0, ... }
534 mov%1 m1, [srcq+wq*4+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
535 pand m0, m2 ; (word) { U0, V0, ..., U3, V3 }
536 pand m1, m2 ; (word) { U4, V4, ..., U7, V7 }
539 packuswb m0, m1 ; (byte) { U0, V0, ..., U7, V7 }
540 pand m1, m0, m2 ; (word) { U0, U1, ..., U7 }
541 psrlw m0, 8 ; (word) { V0, V1, ..., V7 }
543 packuswb m1, m0 ; (byte) { U0, ... U7, V1, ... V7 }
545 movhps [dstVq+wq], m1
547 packuswb m1, m1 ; (byte) { U0, ... U3 }
548 packuswb m0, m0 ; (byte) { V0, ... V3 }
551 %endif ; mmsize == 8/16
557 ; %1 = nr. of XMM registers
559 ; %3 = if specified, it means that unaligned and aligned code in loop
560 ; will be the same (i.e. UYVY+AVX), and thus we don't need to
561 ; split the loop in an aligned and unaligned case
562 %macro YUYV_TO_UV_FN 2-3
563 cglobal %2ToUV, 4, 5, %1, dstU, dstV, unused, src, w
571 %if mmsize == 16 && %0 == 2
574 lea srcq, [srcq+wq*4]
575 pcmpeqb m2, m2 ; (byte) { 0xff } x 16
576 psrlw m2, 8 ; (word) { 0x00ff } x 8
577 ; NOTE: if uyvy+avx, u/a are identical
578 %if mmsize == 16 && %0 == 2
581 LOOP_YUYV_TO_UV a, %2
584 LOOP_YUYV_TO_UV u, %2
587 LOOP_YUYV_TO_UV a, %2
588 %endif ; mmsize == 8/16
591 ; %1 = a (aligned) or u (unaligned)
593 %macro LOOP_NVXX_TO_UV 2
595 mov%1 m0, [srcq+wq*2] ; (byte) { U0, V0, U1, V1, ... }
596 mov%1 m1, [srcq+wq*2+mmsize] ; (byte) { U8, V8, U9, V9, ... }
597 pand m2, m0, m5 ; (word) { U0, U1, ..., U7 }
598 pand m3, m1, m5 ; (word) { U8, U9, ..., U15 }
599 psrlw m0, 8 ; (word) { V0, V1, ..., V7 }
600 psrlw m1, 8 ; (word) { V8, V9, ..., V15 }
601 packuswb m2, m3 ; (byte) { U0, ..., U15 }
602 packuswb m0, m1 ; (byte) { V0, ..., V15 }
615 ; %1 = nr. of XMM registers
617 %macro NVXX_TO_UV_FN 2
618 cglobal %2ToUV, 4, 5, %1, dstU, dstV, unused, src, w
629 lea srcq, [srcq+wq*2]
630 pcmpeqb m5, m5 ; (byte) { 0xff } x 16
631 psrlw m5, 8 ; (word) { 0x00ff } x 8
635 LOOP_NVXX_TO_UV a, %2
638 LOOP_NVXX_TO_UV u, %2
641 LOOP_NVXX_TO_UV a, %2
642 %endif ; mmsize == 8/16
649 YUYV_TO_UV_FN 0, yuyv
650 YUYV_TO_UV_FN 0, uyvy
651 NVXX_TO_UV_FN 0, nv12
652 NVXX_TO_UV_FN 0, nv21
658 YUYV_TO_UV_FN 3, yuyv
659 YUYV_TO_UV_FN 3, uyvy
660 NVXX_TO_UV_FN 5, nv12
661 NVXX_TO_UV_FN 5, nv21
665 ; in theory, we could write a yuy2-to-y using vpand (i.e. AVX), but
666 ; that's not faster in practice
667 YUYV_TO_UV_FN 3, yuyv
668 YUYV_TO_UV_FN 3, uyvy, 1
669 NVXX_TO_UV_FN 5, nv12
670 NVXX_TO_UV_FN 5, nv21