;*****************************************************************************
;* x86util.asm: x86 utility macros
;*****************************************************************************
-;* Copyright (C) 2008-2011 x264 project
+;* Copyright (C) 2008-2015 x264 project
;*
;* Authors: Holger Lubitz <holger@lubitz.org>
;* Loren Merritt <lorenm@u.washington.edu>
%assign SIZEOF_PIXEL 1
%assign SIZEOF_DCTCOEF 2
%define pixel byte
-%ifdef HIGH_BIT_DEPTH
+%define vpbroadcastdct vpbroadcastw
+%define vpbroadcastpix vpbroadcastb
+%if HIGH_BIT_DEPTH
%assign SIZEOF_PIXEL 2
%assign SIZEOF_DCTCOEF 4
%define pixel word
+ %define vpbroadcastdct vpbroadcastd
+ %define vpbroadcastpix vpbroadcastw
%endif
%assign FENC_STRIDEB SIZEOF_PIXEL*FENC_STRIDE
%assign PIXEL_MAX ((1 << BIT_DEPTH)-1)
%macro FIX_STRIDES 1-*
-%ifdef HIGH_BIT_DEPTH
+%if HIGH_BIT_DEPTH
%rep %0
add %1, %1
%rotate 1
%macro SBUTTERFLY 4
-%if avx_enabled && mmsize == 16
+%ifidn %1, dqqq
+ vperm2i128 m%4, m%2, m%3, q0301 ; punpckh
+ vinserti128 m%2, m%2, xm%3, 1 ; punpckl
+%elif avx_enabled && mmsize >= 16
punpckh%1 m%4, m%2, m%3
punpckl%1 m%2, m%3
%else
%endmacro
%macro TRANSPOSE8x8W 9-11
-%ifdef ARCH_X86_64
+%if ARCH_X86_64
SBUTTERFLY wd, %1, %2, %9
SBUTTERFLY wd, %3, %4, %9
SBUTTERFLY wd, %5, %6, %9
%endif
%endmacro
+%macro WIDEN_SXWD 2
+ punpckhwd m%2, m%1
+ psrad m%2, 16
+%if cpuflag(sse4)
+ pmovsxwd m%1, m%1
+%else
+ punpcklwd m%1, m%1
+ psrad m%1, 16
+%endif
+%endmacro
+
%macro ABSW 2-3 ; dst, src, tmp (tmp used only if dst==src)
%if cpuflag(ssse3)
pabsw %1, %2
%endif
%endmacro
-%macro ABSD 2
+%macro ABSD 2-3
%if cpuflag(ssse3)
pabsd %1, %2
%else
- pxor %1, %1
- pcmpgtd %1, %2
- pxor %2, %1
- psubd %2, %1
- SWAP %1, %2
+ %define %%s %2
+%if %0 == 3
+ mova %3, %2
+ %define %%s %3
+%endif
+ pxor %1, %1
+ pcmpgtd %1, %%s
+ pxor %%s, %1
+ psubd %%s, %1
+ SWAP %1, %%s
%endif
%endmacro
%endmacro
%imacro SPLATW 2-3 0
- PSHUFLW %1, %2, (%3)*q1111
+%if cpuflag(avx2) && %3 == 0
+ vpbroadcastw %1, %2
+%else
+ PSHUFLW %1, %2, (%3)*q1111
%if mmsize == 16
- punpcklqdq %1, %1
+ punpcklqdq %1, %1
+%endif
%endif
%endmacro
pminsw %1, %3
%endmacro
+%macro MOVHL 2 ; dst, src
+%ifidn %1, %2
+ punpckhqdq %1, %2
+%elif cpuflag(avx)
+ punpckhqdq %1, %2, %2
+%elif cpuflag(sse4)
+ pshufd %1, %2, q3232 ; pshufd is slow on some older CPUs, so only use it on more modern ones
+%else
+ movhlps %1, %2 ; may cause an int/float domain transition and has a dependency on dst
+%endif
+%endmacro
+
%macro HADDD 2 ; sum junk
-%if mmsize == 16
- movhlps %2, %1
+%if sizeof%1 == 32
+%define %2 xmm%2
+ vextracti128 %2, %1, 1
+%define %1 xmm%1
+ paddd %1, %2
+%endif
+%if mmsize >= 16
+ MOVHL %2, %1
paddd %1, %2
%endif
+%if cpuflag(xop) && sizeof%1 == 16
+ vphadddq %1, %1
+%else
PSHUFLW %2, %1, q0032
paddd %1, %2
+%endif
+%undef %1
+%undef %2
%endmacro
-%macro HADDW 2
- pmaddwd %1, [pw_1]
- HADDD %1, %2
+%macro HADDW 2 ; reg, tmp
+%if cpuflag(xop) && sizeof%1 == 16
+ vphaddwq %1, %1
+ MOVHL %2, %1
+ paddd %1, %2
+%else
+ pmaddwd %1, [pw_1]
+ HADDD %1, %2
+%endif
%endmacro
-%macro HADDUW 2
+%macro HADDUWD 2
+%if cpuflag(xop) && sizeof%1 == 16
+ vphadduwd %1, %1
+%else
psrld %2, %1, 16
pslld %1, 16
psrld %1, 16
paddd %1, %2
- HADDD %1, %2
+%endif
+%endmacro
+
+%macro HADDUW 2
+%if cpuflag(xop) && sizeof%1 == 16
+ vphadduwq %1, %1
+ MOVHL %2, %1
+ paddd %1, %2
+%else
+ HADDUWD %1, %2
+ HADDD %1, %2
+%endif
%endmacro
%macro PALIGNR 4-5 ; [dst,] src1, src2, imm, tmp
-%if cpuflag(ssse3)
+; AVX2 version uses a precalculated extra input that
+; can be re-used across calls
+%if sizeof%1==32
+ ; %3 = abcdefgh ijklmnop (lower address)
+ ; %2 = ABCDEFGH IJKLMNOP (higher address)
+; vperm2i128 %5, %2, %3, q0003 ; %5 = ijklmnop ABCDEFGH
+%if %4 < 16
+ palignr %1, %5, %3, %4 ; %1 = bcdefghi jklmnopA
+%else
+ palignr %1, %2, %5, %4-16 ; %1 = pABCDEFG HIJKLMNO
+%endif
+%elif cpuflag(ssse3)
%if %0==5
palignr %1, %2, %3, %4
%else
%endif
%endmacro
+; shift a mmxreg by n bytes, or a xmmreg by 2*n bytes
+; values shifted in are undefined
+; faster if dst==src
+%define PSLLPIX PSXLPIX l, -1, ;dst, src, shift
+%define PSRLPIX PSXLPIX r, 1, ;dst, src, shift
+%macro PSXLPIX 5
+ %if mmsize == 8
+ %if %5&1
+ ps%1lq %3, %4, %5*8
+ %else
+ pshufw %3, %4, (q3210<<8>>(8+%2*%5))&0xff
+ %endif
+ %else
+ ps%1ldq %3, %4, %5*2
+ %endif
+%endmacro
%macro DEINTB 5 ; mask, reg1, mask, reg2, optional src to fill masks from
%ifnum %5
%endif
%elifidn %1, q
shufps m%5, m%3, m%4, q3131
- shufps m%3, m%4, q2020
+ shufps m%3, m%3, m%4, q2020
SWAP %4, %5
%endif
%endmacro
+%macro TRANS_XOP 5-6
+%ifidn %1, d
+ vpperm m%5, m%3, m%4, [transd_shuf1]
+ vpperm m%3, m%3, m%4, [transd_shuf2]
+%elifidn %1, q
+ shufps m%5, m%3, m%4, q3131
+ shufps m%3, m%4, q2020
+%endif
+ SWAP %4, %5
+%endmacro
+
%macro HADAMARD 5-6
; %1=distance in words (0 for vertical pass, 1/2/4 for horizontal passes)
; %2=sumsub/max/amax (sum and diff / maximum / maximum of absolutes)
; %5(%6): tmpregs
%if %1!=0 ; have to reorder stuff for horizontal op
%ifidn %2, sumsub
- %define ORDER ord
- ; sumsub needs order because a-b != b-a unless a=b
+ %define ORDER ord
+ ; sumsub needs order because a-b != b-a unless a=b
%else
- %define ORDER unord
- ; if we just max, order doesn't matter (allows pblendw+or in sse4)
+ %define ORDER unord
+ ; if we just max, order doesn't matter (allows pblendw+or in sse4)
%endif
%if %1==1
- TRANS d, ORDER, %3, %4, %5, %6
+ TRANS d, ORDER, %3, %4, %5, %6
%elif %1==2
- %if mmsize==8
- SBUTTERFLY dq, %3, %4, %5
- %else
- TRANS q, ORDER, %3, %4, %5, %6
- %endif
+ %if mmsize==8
+ SBUTTERFLY dq, %3, %4, %5
+ %else
+ TRANS q, ORDER, %3, %4, %5, %6
+ %endif
%elif %1==4
- SBUTTERFLY qdq, %3, %4, %5
+ SBUTTERFLY qdq, %3, %4, %5
+ %elif %1==8
+ SBUTTERFLY dqqq, %3, %4, %5
%endif
%endif
%ifidn %2, sumsub
%endif
%endmacro
+; doesn't include the "pmaddubsw hmul_8p" pass
+%macro HADAMARD8_2D_HMUL 10
+ HADAMARD4_V %1, %2, %3, %4, %9
+ HADAMARD4_V %5, %6, %7, %8, %9
+ SUMSUB_BADC w, %1, %5, %2, %6, %9
+ HADAMARD 2, sumsub, %1, %5, %9, %10
+ HADAMARD 2, sumsub, %2, %6, %9, %10
+ SUMSUB_BADC w, %3, %7, %4, %8, %9
+ HADAMARD 2, sumsub, %3, %7, %9, %10
+ HADAMARD 2, sumsub, %4, %8, %9, %10
+ HADAMARD 1, amax, %1, %5, %9, %10
+ HADAMARD 1, amax, %2, %6, %9, %5
+ HADAMARD 1, amax, %3, %7, %9, %5
+ HADAMARD 1, amax, %4, %8, %9, %5
+%endmacro
+
%macro SUMSUB2_AB 4
-%ifnum %3
+%if cpuflag(xop)
+ pmacs%1%1 m%4, m%3, [p%1_m2], m%2
+ pmacs%1%1 m%2, m%2, [p%1_2], m%3
+%elifnum %3
psub%1 m%4, m%2, m%3
psub%1 m%4, m%3
padd%1 m%2, m%2
%endif
%endmacro
-%macro SUMSUB2_BA 4
-%if avx_enabled
- padd%1 m%4, m%2, m%3
- padd%1 m%4, m%3
- psub%1 m%3, m%2
- psub%1 m%3, m%2
- SWAP %2, %4
-%else
- mova m%4, m%2
- padd%1 m%2, m%3
- padd%1 m%2, m%3
- psub%1 m%3, m%4
- psub%1 m%3, m%4
-%endif
-%endmacro
-
%macro SUMSUBD2_AB 5
%ifnum %4
psra%1 m%5, m%2, 1 ; %3: %3>>1
%endmacro
-%macro LOAD_DIFF 5
-%ifdef HIGH_BIT_DEPTH
+%macro LOAD_DIFF 5-6 1
+%if HIGH_BIT_DEPTH
+%if %6 ; %5 aligned?
mova %1, %4
psubw %1, %5
-%elifidn %3, none
+%else
+ movu %1, %4
+ movu %2, %5
+ psubw %1, %2
+%endif
+%else ; !HIGH_BIT_DEPTH
+%ifidn %3, none
movh %1, %4
movh %2, %5
punpcklbw %1, %2
punpcklbw %2, %3
psubw %1, %2
%endif
+%endif ; HIGH_BIT_DEPTH
%endmacro
%macro LOAD_DIFF8x4 8 ; 4x dst, 1x tmp, 1x mul, 2x ptr
-%if cpuflag(ssse3)
+%if BIT_DEPTH == 8 && cpuflag(ssse3)
movh m%2, [%8+%1*FDEC_STRIDE]
movh m%1, [%7+%1*FENC_STRIDE]
punpcklbw m%1, m%2
pmaddubsw m%3, m%6
pmaddubsw m%4, m%6
%else
- LOAD_DIFF m%1, m%5, m%6, [%7+%1*FENC_STRIDE], [%8+%1*FDEC_STRIDE]
- LOAD_DIFF m%2, m%5, m%6, [%7+%2*FENC_STRIDE], [%8+%2*FDEC_STRIDE]
- LOAD_DIFF m%3, m%5, m%6, [%7+%3*FENC_STRIDE], [%8+%3*FDEC_STRIDE]
- LOAD_DIFF m%4, m%5, m%6, [%7+%4*FENC_STRIDE], [%8+%4*FDEC_STRIDE]
+ LOAD_DIFF m%1, m%5, m%6, [%7+%1*FENC_STRIDEB], [%8+%1*FDEC_STRIDEB]
+ LOAD_DIFF m%2, m%5, m%6, [%7+%2*FENC_STRIDEB], [%8+%2*FDEC_STRIDEB]
+ LOAD_DIFF m%3, m%5, m%6, [%7+%3*FENC_STRIDEB], [%8+%3*FDEC_STRIDEB]
+ LOAD_DIFF m%4, m%5, m%6, [%7+%4*FENC_STRIDEB], [%8+%4*FDEC_STRIDEB]
%endif
%endmacro
movh [r0+3*FDEC_STRIDE], %4
%endmacro
-%macro LOAD_DIFF_8x4P 7-10 r0,r2,0 ; 4x dest, 2x temp, 2x pointer, increment?
- LOAD_DIFF m%1, m%5, m%7, [%8], [%9]
- LOAD_DIFF m%2, m%6, m%7, [%8+r1], [%9+r3]
- LOAD_DIFF m%3, m%5, m%7, [%8+2*r1], [%9+2*r3]
- LOAD_DIFF m%4, m%6, m%7, [%8+r4], [%9+r5]
+%macro LOAD_DIFF_8x4P 7-11 r0,r2,0,1 ; 4x dest, 2x temp, 2x pointer, increment, aligned?
+ LOAD_DIFF m%1, m%5, m%7, [%8], [%9], %11
+ LOAD_DIFF m%2, m%6, m%7, [%8+r1], [%9+r3], %11
+ LOAD_DIFF m%3, m%5, m%7, [%8+2*r1], [%9+2*r3], %11
+ LOAD_DIFF m%4, m%6, m%7, [%8+r4], [%9+r5], %11
%if %10
lea %8, [%8+4*r1]
lea %9, [%9+4*r3]
%endif
%endmacro
+; 2xdst, 2xtmp, 2xsrcrow
+%macro LOAD_DIFF16x2_AVX2 6
+ pmovzxbw m%1, [r1+%5*FENC_STRIDE]
+ pmovzxbw m%2, [r1+%6*FENC_STRIDE]
+ pmovzxbw m%3, [r2+(%5-4)*FDEC_STRIDE]
+ pmovzxbw m%4, [r2+(%6-4)*FDEC_STRIDE]
+ psubw m%1, m%3
+ psubw m%2, m%4
+%endmacro
+
%macro DIFFx2 6-7
movh %3, %5
punpcklbw %3, %4
packuswb %2, %1
%endmacro
-%macro STORE_DIFF 4
+; (high depth) in: %1, %2, min to clip, max to clip, mem128
+; in: %1, tmp, %3, mem64
+%macro STORE_DIFF 4-5
+%if HIGH_BIT_DEPTH
+ psrad %1, 6
+ psrad %2, 6
+ packssdw %1, %2
+ paddw %1, %5
+ CLIPW %1, %3, %4
+ mova %5, %1
+%else
movh %2, %4
punpcklbw %2, %3
psraw %1, 6
paddsw %1, %2
packuswb %1, %1
movh %4, %1
+%endif
+%endmacro
+
+%macro SHUFFLE_MASK_W 8
+ %rep 8
+ %if %1>=0x80
+ db %1, %1
+ %else
+ db %1*2
+ db %1*2+1
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+; instruction, accum, input, iteration (zero to swap, nonzero to add)
+%macro ACCUM 4
+%if %4
+ %1 m%2, m%3
+%else
+ SWAP %2, %3
+%endif
%endmacro