1 ;*****************************************************************************
2 ;* x86util.asm: x86 utility macros
3 ;*****************************************************************************
4 ;* Copyright (C) 2008-2010 x264 project
6 ;* Authors: Holger Lubitz <holger@lubitz.org>
7 ;* Loren Merritt <lorenm@u.washington.edu>
9 ;* This program is free software; you can redistribute it and/or modify
10 ;* it under the terms of the GNU General Public License as published by
11 ;* the Free Software Foundation; either version 2 of the License, or
12 ;* (at your option) any later version.
14 ;* This program is distributed in the hope that it will be useful,
15 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 ;* GNU General Public License for more details.
19 ;* You should have received a copy of the GNU General Public License
20 ;* along with this program; if not, write to the Free Software
21 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 ;* This program is also available under a commercial proprietary license.
24 ;* For more information, contact us at licensing@x264.com.
25 ;*****************************************************************************
27 %assign FENC_STRIDE 16
28 %assign FDEC_STRIDE 32
30 %assign SIZEOF_PIXEL 1
31 %assign SIZEOF_DCTCOEF 2
32 %ifdef X264_HIGH_BIT_DEPTH
33 %assign SIZEOF_PIXEL 2
34 %assign SIZEOF_DCTCOEF 4
37 %assign PIXEL_MAX ((1 << BIT_DEPTH)-1)
53 %macro TRANSPOSE4x4W 5
54 SBUTTERFLY wd, %1, %2, %5
55 SBUTTERFLY wd, %3, %4, %5
56 SBUTTERFLY dq, %1, %3, %5
57 SBUTTERFLY dq, %2, %4, %5
61 %macro TRANSPOSE2x4x4W 5
62 SBUTTERFLY wd, %1, %2, %5
63 SBUTTERFLY wd, %3, %4, %5
64 SBUTTERFLY dq, %1, %3, %5
65 SBUTTERFLY dq, %2, %4, %5
66 SBUTTERFLY qdq, %1, %2, %5
67 SBUTTERFLY qdq, %3, %4, %5
70 %macro TRANSPOSE4x4D 5
71 SBUTTERFLY dq, %1, %2, %5
72 SBUTTERFLY dq, %3, %4, %5
73 SBUTTERFLY qdq, %1, %3, %5
74 SBUTTERFLY qdq, %2, %4, %5
78 %macro TRANSPOSE8x8W 9-11
80 SBUTTERFLY wd, %1, %2, %9
81 SBUTTERFLY wd, %3, %4, %9
82 SBUTTERFLY wd, %5, %6, %9
83 SBUTTERFLY wd, %7, %8, %9
84 SBUTTERFLY dq, %1, %3, %9
85 SBUTTERFLY dq, %2, %4, %9
86 SBUTTERFLY dq, %5, %7, %9
87 SBUTTERFLY dq, %6, %8, %9
88 SBUTTERFLY qdq, %1, %5, %9
89 SBUTTERFLY qdq, %2, %6, %9
90 SBUTTERFLY qdq, %3, %7, %9
91 SBUTTERFLY qdq, %4, %8, %9
95 ; in: m0..m7, unless %11 in which case m6 is in %9
96 ; out: m0..m7, unless %11 in which case m4 is in %10
97 ; spills into %9 and %10
101 SBUTTERFLY wd, %1, %2, %7
104 SBUTTERFLY wd, %3, %4, %2
105 SBUTTERFLY wd, %5, %6, %2
106 SBUTTERFLY wd, %7, %8, %2
107 SBUTTERFLY dq, %1, %3, %2
110 SBUTTERFLY dq, %2, %4, %3
111 SBUTTERFLY dq, %5, %7, %3
112 SBUTTERFLY dq, %6, %8, %3
113 SBUTTERFLY qdq, %1, %5, %3
114 SBUTTERFLY qdq, %2, %6, %3
117 SBUTTERFLY qdq, %3, %7, %2
118 SBUTTERFLY qdq, %4, %8, %2
127 %macro ABS1_MMX 2 ; a, tmp
133 %macro ABS2_MMX 4 ; a, b, tmp0, tmp1
191 %define ABS1 ABS1_MMX
192 %define ABS2 ABS2_MMX
193 %define ABSB ABSB_MMX
194 %define ABSB2 ABSB2_MMX
197 movd %1, [%2-3] ;to avoid crossing a cacheline
207 %macro SPLATB_SSSE3 3
226 %macro PALIGNR_SSSE3 4
230 %macro DEINTB 5 ; mask, reg1, mask, reg2, optional src to fill masks from
238 pand m%1, m%2 ; dst .. y6 .. y4
239 pand m%3, m%4 ; src .. y6 .. y4
240 psrlw m%2, 8 ; dst .. y7 .. y5
241 psrlw m%4, 8 ; src .. y7 .. y5
256 %macro SUMSUB_BADC 4-5
270 %macro HADAMARD4_V 4+
271 SUMSUB_BADC %1, %2, %3, %4
272 SUMSUB_BADC %1, %3, %2, %4
275 %macro HADAMARD8_V 8+
276 SUMSUB_BADC %1, %2, %3, %4
277 SUMSUB_BADC %5, %6, %7, %8
278 SUMSUB_BADC %1, %3, %2, %4
279 SUMSUB_BADC %5, %7, %6, %8
280 SUMSUB_BADC %1, %5, %2, %6
281 SUMSUB_BADC %3, %7, %4, %8
284 %macro TRANS_SSE2 5-6
286 ; %1: transpose width (d/q) - use SBUTTERFLY qdq for dq
287 ; %2: ord/unord (for compat with sse4, unused)
291 %define mask [mask_10]
294 %define mask [mask_1100]
297 %if %0==6 ; less dependency if we have two tmp
298 mova m%5, mask ; ff00
300 psll%1 m%4, shift ; x4..
302 pandn m%5, m%3 ; ..x0
303 psrl%1 m%3, shift ; ..x1
306 %else ; more dependency, one insn less. sometimes faster, sometimes not
308 psll%1 m%4, shift ; x4..
309 pxor m%4, m%3 ; (x4^x1)x0
310 pand m%4, mask ; (x4^x1)..
312 psrl%1 m%4, shift ; ..(x1^x4)
318 %macro TRANS_SSE4 5-6 ; see above
324 pblendw m%3, m%4, 10101010b
327 pblendw m%4, m%5, 01010101b
334 shufps m%3, m%4, 10001000b
335 shufps m%5, m%4, 11011101b
341 ; %1=distance in words (0 for vertical pass, 1/2/4 for horizontal passes)
342 ; %2=sumsub/max/amax (sum and diff / maximum / maximum of absolutes)
345 %if %1!=0 ; have to reorder stuff for horizontal op
348 ; sumsub needs order because a-b != b-a unless a=b
351 ; if we just max, order doesn't matter (allows pblendw+or in sse4)
354 TRANS d, ORDER, %3, %4, %5, %6
357 SBUTTERFLY dq, %3, %4, %5
359 TRANS q, ORDER, %3, %4, %5, %6
362 SBUTTERFLY qdq, %3, %4, %5
366 SUMSUB_BA m%3, m%4, m%5
370 ABS2 m%3, m%4, m%5, m%6
381 %macro HADAMARD2_2D 6-7 sumsub
382 HADAMARD 0, sumsub, %1, %2, %5
383 HADAMARD 0, sumsub, %3, %4, %5
384 SBUTTERFLY %6, %1, %2, %5
386 HADAMARD 0, amax, %1, %2, %5, %7
388 HADAMARD 0, %7, %1, %2, %5
390 SBUTTERFLY %6, %3, %4, %5
392 HADAMARD 0, amax, %3, %4, %5, %7
394 HADAMARD 0, %7, %3, %4, %5
398 %macro HADAMARD4_2D 5-6 sumsub
399 HADAMARD2_2D %1, %2, %3, %4, %5, wd
400 HADAMARD2_2D %1, %3, %2, %4, %5, dq, %6
404 %macro HADAMARD4_2D_SSE 5-6 sumsub
405 HADAMARD 0, sumsub, %1, %2, %5 ; 1st V row 0 + 1
406 HADAMARD 0, sumsub, %3, %4, %5 ; 1st V row 2 + 3
407 SBUTTERFLY wd, %1, %2, %5 ; %1: m0 1+0 %2: m1 1+0
408 SBUTTERFLY wd, %3, %4, %5 ; %3: m0 3+2 %4: m1 3+2
409 HADAMARD2_2D %1, %3, %2, %4, %5, dq
410 SBUTTERFLY qdq, %1, %2, %5
411 HADAMARD 0, %6, %1, %2, %5 ; 2nd H m1/m0 row 0+1
412 SBUTTERFLY qdq, %3, %4, %5
413 HADAMARD 0, %6, %3, %4, %5 ; 2nd H m1/m0 row 2+3
416 %macro HADAMARD8_2D 9-10 sumsub
417 HADAMARD2_2D %1, %2, %3, %4, %9, wd
418 HADAMARD2_2D %5, %6, %7, %8, %9, wd
419 HADAMARD2_2D %1, %3, %2, %4, %9, dq
420 HADAMARD2_2D %5, %7, %6, %8, %9, dq
421 HADAMARD2_2D %1, %5, %3, %7, %9, qdq, %10
422 HADAMARD2_2D %2, %6, %4, %8, %9, qdq, %10
448 psraw %2, 1 ; %2: %2>>1
449 psraw %1, 1 ; %1: %1>>1
450 paddw %2, %4 ; %2: %2>>1+%1
451 psubw %1, %3 ; %1: %1>>1-%2
456 SUMSUB_BADC m%4, m%1, m%3, m%2; m%5
457 SUMSUB_BA m%3, m%4, m%5
458 SUMSUB2_AB m%1, m%2, m%5
459 SWAP %1, %3, %4, %5, %2
461 SUMSUB_BADC m%4, m%1, m%3, m%2
464 SUMSUB2_AB m%1, [%5], m%2
471 SUMSUBD2_AB m%2, m%4, m%6, m%5
472 ; %2: %2>>1-%4 %4: %2+%4>>1
473 SUMSUB_BA m%3, m%1, m%6
474 ; %3: %1+%3 %1: %1-%3
475 SUMSUB_BADC m%4, m%3, m%2, m%1, m%6
476 ; %4: %1+%3 + (%2+%4>>1)
477 ; %3: %1+%3 - (%2+%4>>1)
478 ; %2: %1-%3 + (%2>>1-%4)
479 ; %1: %1-%3 - (%2>>1-%4)
481 SUMSUBD2_AB m%2, m%4, [%5], [%5+16]
483 SUMSUB_BADC m%4, m%3, m%2, m%1
486 ; %1: %1+%3 + (%2+%4>>1) row0
487 ; %2: %1-%3 + (%2>>1-%4) row1
488 ; %3: %1-%3 - (%2>>1-%4) row2
489 ; %4: %1+%3 - (%2+%4>>1) row3
494 %ifdef X264_HIGH_BIT_DEPTH
512 %macro LOAD_DIFF8x4_SSE2 8
513 LOAD_DIFF m%1, m%5, m%6, [%7+%1*FENC_STRIDE], [%8+%1*FDEC_STRIDE]
514 LOAD_DIFF m%2, m%5, m%6, [%7+%2*FENC_STRIDE], [%8+%2*FDEC_STRIDE]
515 LOAD_DIFF m%3, m%5, m%6, [%7+%3*FENC_STRIDE], [%8+%3*FDEC_STRIDE]
516 LOAD_DIFF m%4, m%5, m%6, [%7+%4*FENC_STRIDE], [%8+%4*FDEC_STRIDE]
519 %macro LOAD_DIFF8x4_SSSE3 8 ; 4x dst, 1x tmp, 1x mul, 2x ptr
520 movh m%2, [%8+%1*FDEC_STRIDE]
521 movh m%1, [%7+%1*FENC_STRIDE]
523 movh m%3, [%8+%2*FDEC_STRIDE]
524 movh m%2, [%7+%2*FENC_STRIDE]
526 movh m%4, [%8+%3*FDEC_STRIDE]
527 movh m%3, [%7+%3*FENC_STRIDE]
529 movh m%5, [%8+%4*FDEC_STRIDE]
530 movh m%4, [%7+%4*FENC_STRIDE]
543 movhps [%5+%6+32], m%1
544 movhps [%5+%6+40], m%2
545 movhps [%5+%6+48], m%3
546 movhps [%5+%6+56], m%4
550 movhps [r0-4*FDEC_STRIDE], %1
551 movh [r0-3*FDEC_STRIDE], %1
552 movhps [r0-2*FDEC_STRIDE], %2
553 movh [r0-1*FDEC_STRIDE], %2
554 movhps [r0+0*FDEC_STRIDE], %3
555 movh [r0+1*FDEC_STRIDE], %3
556 movhps [r0+2*FDEC_STRIDE], %4
557 movh [r0+3*FDEC_STRIDE], %4
560 %macro LOAD_DIFF_8x4P 7-10 r0,r2,0 ; 4x dest, 2x temp, 2x pointer, increment?
561 LOAD_DIFF m%1, m%5, m%7, [%8], [%9]
562 LOAD_DIFF m%2, m%6, m%7, [%8+r1], [%9+r3]
563 LOAD_DIFF m%3, m%5, m%7, [%8+2*r1], [%9+2*r3]
564 LOAD_DIFF m%4, m%6, m%7, [%8+r4], [%9+r5]
583 %ifdef X264_HIGH_BIT_DEPTH
603 %macro CLIPW 3 ;(dst, min, max)
608 %macro FIX_STRIDES 1-*
609 %ifdef X264_HIGH_BIT_DEPTH
619 pshuflw %1, %2, %3*0x55
622 pshufw %1, %2, %3*0x55
628 pshufd %1, %2, %3*0x55
630 pshufw %1, %2, %3*0x11 + (%3+1)*0x44