1 ;*****************************************************************************
3 ;*****************************************************************************
4 ;* Copyright (C) 2008-2010 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Holger Lubitz <holger@lubitz.org>
9 ;* This file is part of FFmpeg.
11 ;* FFmpeg is free software; you can redistribute it and/or
12 ;* modify it under the terms of the GNU Lesser General Public
13 ;* License as published by the Free Software Foundation; either
14 ;* version 2.1 of the License, or (at your option) any later version.
16 ;* FFmpeg is distributed in the hope that it will be useful,
17 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
18 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 ;* Lesser General Public License for more details.
21 ;* You should have received a copy of the GNU Lesser General Public
22 ;* License along with FFmpeg; if not, write to the Free Software
23 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 ;******************************************************************************
26 %define private_prefix ff
27 %define public_prefix avpriv
28 %define cpuflags_mmxext cpuflags_mmx2
30 %include "libavutil/x86/x86inc.asm"
32 ; expands to [base],...,[base+7*stride]
33 %define PASS8ROWS(base, base3, stride, stride3) \
34 [base], [base + stride], [base + 2*stride], [base3], \
35 [base3 + stride], [base3 + 2*stride], [base3 + stride3], [base3 + stride*4]
37 ; Interleave low src0 with low src1 and store in src0,
38 ; interleave high src0 with high src1 and store in src1.
40 ; %2 - index of the register with src0
41 ; %3 - index of the register with src1
42 ; %4 - index of the register for intermediate results
43 ; example for %1 - wd: input: src0: x0 x1 x2 x3 z0 z1 z2 z3
44 ; src1: y0 y1 y2 y3 q0 q1 q2 q3
45 ; output: src0: x0 y0 x1 y1 x2 y2 x3 y3
46 ; src1: z0 q0 z1 q1 z2 q2 z3 q3
49 vperm2i128 m%4, m%2, m%3, q0301
50 vinserti128 m%2, m%2, xm%3, 1
51 %elif avx_enabled == 0
56 punpckh%1 m%4, m%2, m%3
63 punpckl%1 m%4, m%2, m%3
64 punpckh%1 m%2, m%2, m%3
69 unpcklps m%3, m%1, m%2
70 unpckhps m%1, m%1, m%2
80 %macro TRANSPOSE4x4B 5
81 SBUTTERFLY bw, %1, %2, %5
82 SBUTTERFLY bw, %3, %4, %5
83 SBUTTERFLY wd, %1, %3, %5
84 SBUTTERFLY wd, %2, %4, %5
88 %macro TRANSPOSE4x4W 5
89 SBUTTERFLY wd, %1, %2, %5
90 SBUTTERFLY wd, %3, %4, %5
91 SBUTTERFLY dq, %1, %3, %5
92 SBUTTERFLY dq, %2, %4, %5
96 %macro TRANSPOSE2x4x4B 5
97 SBUTTERFLY bw, %1, %2, %5
98 SBUTTERFLY bw, %3, %4, %5
99 SBUTTERFLY wd, %1, %3, %5
100 SBUTTERFLY wd, %2, %4, %5
101 SBUTTERFLY dq, %1, %2, %5
102 SBUTTERFLY dq, %3, %4, %5
105 %macro TRANSPOSE2x4x4W 5
106 SBUTTERFLY wd, %1, %2, %5
107 SBUTTERFLY wd, %3, %4, %5
108 SBUTTERFLY dq, %1, %3, %5
109 SBUTTERFLY dq, %2, %4, %5
110 SBUTTERFLY qdq, %1, %2, %5
111 SBUTTERFLY qdq, %3, %4, %5
114 %macro TRANSPOSE4x4D 5
115 SBUTTERFLY dq, %1, %2, %5
116 SBUTTERFLY dq, %3, %4, %5
117 SBUTTERFLY qdq, %1, %3, %5
118 SBUTTERFLY qdq, %2, %4, %5
122 ; identical behavior to TRANSPOSE4x4D, but using SSE1 float ops
123 %macro TRANSPOSE4x4PS 5
124 SBUTTERFLYPS %1, %2, %5
125 SBUTTERFLYPS %3, %4, %5
126 SBUTTERFLYPD %1, %3, %5
127 SBUTTERFLYPD %2, %4, %5
131 %macro TRANSPOSE8x4D 9-11
133 SBUTTERFLY dq, %1, %2, %9
134 SBUTTERFLY dq, %3, %4, %9
135 SBUTTERFLY dq, %5, %6, %9
136 SBUTTERFLY dq, %7, %8, %9
137 SBUTTERFLY qdq, %1, %3, %9
138 SBUTTERFLY qdq, %2, %4, %9
139 SBUTTERFLY qdq, %5, %7, %9
140 SBUTTERFLY qdq, %6, %8, %9
145 ; out: m0..m7, unless %11 in which case m2 is in %9
146 ; spills into %9 and %10
148 SBUTTERFLY dq, %1, %2, %7
151 SBUTTERFLY dq, %3, %4, %2
152 SBUTTERFLY dq, %5, %6, %2
153 SBUTTERFLY dq, %7, %8, %2
154 SBUTTERFLY qdq, %1, %3, %2
157 SBUTTERFLY qdq, %2, %4, %3
158 SBUTTERFLY qdq, %5, %7, %3
159 SBUTTERFLY qdq, %6, %8, %3
168 %macro TRANSPOSE8x8W 9-11
170 SBUTTERFLY wd, %1, %2, %9
171 SBUTTERFLY wd, %3, %4, %9
172 SBUTTERFLY wd, %5, %6, %9
173 SBUTTERFLY wd, %7, %8, %9
174 SBUTTERFLY dq, %1, %3, %9
175 SBUTTERFLY dq, %2, %4, %9
176 SBUTTERFLY dq, %5, %7, %9
177 SBUTTERFLY dq, %6, %8, %9
178 SBUTTERFLY qdq, %1, %5, %9
179 SBUTTERFLY qdq, %2, %6, %9
180 SBUTTERFLY qdq, %3, %7, %9
181 SBUTTERFLY qdq, %4, %8, %9
185 ; in: m0..m7, unless %11 in which case m6 is in %9
186 ; out: m0..m7, unless %11 in which case m4 is in %10
187 ; spills into %9 and %10
191 SBUTTERFLY wd, %1, %2, %7
194 SBUTTERFLY wd, %3, %4, %2
195 SBUTTERFLY wd, %5, %6, %2
196 SBUTTERFLY wd, %7, %8, %2
197 SBUTTERFLY dq, %1, %3, %2
200 SBUTTERFLY dq, %2, %4, %3
201 SBUTTERFLY dq, %5, %7, %3
202 SBUTTERFLY dq, %6, %8, %3
203 SBUTTERFLY qdq, %1, %5, %3
204 SBUTTERFLY qdq, %2, %6, %3
207 SBUTTERFLY qdq, %3, %7, %2
208 SBUTTERFLY qdq, %4, %8, %2
217 %macro TRANSPOSE16x16W 18-19
218 ; in: m0..m15, unless %19 in which case m6 is in %17
219 ; out: m0..m15, unless %19 in which case m4 is in %18
220 ; spills into %17 and %18
225 SBUTTERFLY dqqq, %1, %9, %7
226 SBUTTERFLY dqqq, %2, %10, %7
227 SBUTTERFLY dqqq, %3, %11, %7
228 SBUTTERFLY dqqq, %4, %12, %7
229 SBUTTERFLY dqqq, %5, %13, %7
230 SBUTTERFLY dqqq, %6, %14, %7
233 SBUTTERFLY dqqq, %7, %15, %14
234 SBUTTERFLY dqqq, %8, %16, %14
236 SBUTTERFLY wd, %1, %2, %14
237 SBUTTERFLY wd, %3, %4, %14
238 SBUTTERFLY wd, %5, %6, %14
239 SBUTTERFLY wd, %7, %8, %14
240 SBUTTERFLY wd, %9, %10, %14
241 SBUTTERFLY wd, %11, %12, %14
244 SBUTTERFLY wd, %13, %14, %12
245 SBUTTERFLY wd, %15, %16, %12
247 SBUTTERFLY dq, %1, %3, %12
248 SBUTTERFLY dq, %2, %4, %12
249 SBUTTERFLY dq, %5, %7, %12
250 SBUTTERFLY dq, %6, %8, %12
251 SBUTTERFLY dq, %9, %11, %12
254 SBUTTERFLY dq, %10, %12, %11
255 SBUTTERFLY dq, %13, %15, %11
256 SBUTTERFLY dq, %14, %16, %11
258 SBUTTERFLY qdq, %1, %5, %11
259 SBUTTERFLY qdq, %2, %6, %11
260 SBUTTERFLY qdq, %3, %7, %11
261 SBUTTERFLY qdq, %4, %8, %11
266 SBUTTERFLY qdq, %9, %13, %11
267 SBUTTERFLY qdq, %10, %14, %11
270 SBUTTERFLY qdq, %11, %15, %5
271 SBUTTERFLY qdq, %12, %16, %5
281 %macro TRANSPOSE_8X8B 8
283 %error "This macro does not support mmsize == 8"
289 TRANSPOSE4x4W %1, %3, %5, %7, %2
296 ; PABSW macro assumes %1 != %2, while ABS1/2 macros work in-place
300 %elif cpuflag(mmxext)
325 %elif cpuflag(mmxext) ; a, tmp
341 %elif cpuflag(mmxext) ; a, b, tmp0, tmp1
348 %else ; a, b, tmp0, tmp1
360 %macro ABSB 2 ; source mmreg, temp mmreg (unused for SSSE3)
370 %macro ABSB2 4 ; src1, src2, tmp1, tmp2 (tmp1/2 unused for SSSE3)
405 movd %1, [%2-3] ;to avoid crossing a cacheline
422 %macro HADDD 2 ; sum junk
425 vextracti128 %2, %1, 1
430 %if cpuflag(xop) && sizeof%1 == 16
436 %if notcpuflag(xop) || sizeof%1 != 16
438 PSHUFLW %2, %1, q0032
449 %macro HADDW 2 ; reg, tmp
450 %if cpuflag(xop) && sizeof%1 == 16
460 %macro HADDPS 3 ; dst, src, tmp
474 palignr %1, %2, %3, %4
478 %else ; [dst,] src1, src2, imm, tmp
490 psllq %%dst, (8-%3)*8
527 %elif cpuflag(3dnowext)
536 %macro DEINTB 5 ; mask, reg1, mask, reg2, optional src to fill masks from
538 pand m%3, m%5, m%4 ; src .. y6 .. y4
539 pand m%1, m%5, m%2 ; dst .. y6 .. y4
542 pand m%3, m%1, m%4 ; src .. y6 .. y4
543 pand m%1, m%1, m%2 ; dst .. y6 .. y4
545 psrlw m%2, 8 ; dst .. y7 .. y5
546 psrlw m%4, 8 ; src .. y7 .. y5
567 %macro SUMSUB_BADC 5-6
569 SUMSUB_BA %1, %2, %3, %6
570 SUMSUB_BA %1, %4, %5, %6
614 psra%1 m%5, m%2, 1 ; %3: %3>>1
615 psra%1 m%4, m%3, 1 ; %2: %2>>1
616 padd%1 m%4, m%2 ; %3: %3>>1+%2
617 psub%1 m%5, m%3 ; %2: %2>>1-%3
623 psra%1 m%3, 1 ; %3: %3>>1
624 psra%1 m%2, 1 ; %2: %2>>1
625 padd%1 m%3, %5 ; %3: %3>>1+%2
626 psub%1 m%2, %4 ; %2: %2>>1-%3
632 SUMSUB_BADC w, %4, %1, %3, %2, %5
633 SUMSUB_BA w, %3, %4, %5
634 SUMSUB2_AB w, %1, %2, %5
635 SWAP %1, %3, %4, %5, %2
637 SUMSUB_BADC w, %4, %1, %3, %2
640 SUMSUB2_AB w, %1, [%5], %2
647 SUMSUBD2_AB %1, %3, %5, %7, %6
648 ; %3: %3>>1-%5 %5: %3+%5>>1
649 SUMSUB_BA %1, %4, %2, %7
650 ; %4: %2+%4 %2: %2-%4
651 SUMSUB_BADC %1, %5, %4, %3, %2, %7
652 ; %5: %2+%4 + (%3+%5>>1)
653 ; %4: %2+%4 - (%3+%5>>1)
654 ; %3: %2-%4 + (%3>>1-%5)
655 ; %2: %2-%4 - (%3>>1-%5)
658 SUMSUBD2_AB %1, %3, %5, [%6], [%6+16]
660 SUMSUBD2_AB %1, %3, %5, [%6], [%6+32]
663 SUMSUB_BADC %1, %5, %4, %3, %2
666 ; %2: %2+%4 + (%3+%5>>1) row0
667 ; %3: %2-%4 + (%3>>1-%5) row1
668 ; %4: %2-%4 - (%3>>1-%5) row2
669 ; %5: %2+%4 - (%3+%5>>1) row3
694 movhps [%5+%6+32], m%1
695 movhps [%5+%6+40], m%2
696 movhps [%5+%6+48], m%3
697 movhps [%5+%6+56], m%4
700 %macro LOAD_DIFF_8x4P 7-10 r0,r2,0 ; 4x dest, 2x temp, 2x pointer, increment?
701 LOAD_DIFF m%1, m%5, m%7, [%8], [%9]
702 LOAD_DIFF m%2, m%6, m%7, [%8+r1], [%9+r3]
703 LOAD_DIFF m%3, m%5, m%7, [%8+2*r1], [%9+2*r3]
704 LOAD_DIFF m%4, m%6, m%7, [%8+r4], [%9+r5]
732 %macro STORE_DIFFx2 8 ; add1, add2, reg1, reg2, zero, shift, source, stride
747 %macro PMINUB 3 ; dst, src, ignored
750 %else ; dst, src, tmp
758 %if cpuflag(avx2) && %3 == 0
761 pshuflw %1, %2, (%3)*0x55
763 %elif cpuflag(mmxext)
764 pshufw %1, %2, (%3)*0x55
792 %macro CLIPUB 3 ;(dst, min, max)
797 %macro CLIPW 3 ;(dst, min, max)
802 %macro PMINSD 3 ; dst, src, tmp/unused
818 %macro PMAXSD 3 ; dst, src, tmp/unused
831 %if cpuflag(sse4); src/dst, min, max, unused
834 %elif cpuflag(sse2) ; src/dst, min (float), max (float), unused
839 %else ; src/dst, min, max, tmp
845 %macro VBROADCASTSS 2 ; dst xmm/ymm, src m32/xmm
849 %ifnum sizeof%2 ; avx1 register
850 shufps xmm%1, xmm%2, xmm%2, q0000
851 %if sizeof%1 >= 32 ; mmsize>=32
852 vinsertf128 %1, %1, xmm%1, 1
858 %ifnum sizeof%2 ; sse register
859 shufps %1, %2, %2, q0000
867 %macro VBROADCASTSD 2 ; dst xmm/ymm, src m64
868 %if cpuflag(avx) && mmsize == 32
878 %macro VPBROADCASTD 2 ; dst xmm/ymm, src m32/xmm
881 %elif cpuflag(avx) && sizeof%1 >= 32
882 %error vpbroadcastd not possible with ymm on avx1. try vbroadcastss
884 %ifnum sizeof%2 ; sse2 register
893 %macro VBROADCASTI128 2 ; dst xmm/ymm, src : 128bits val
895 vbroadcasti128 %1, %2
901 %macro SHUFFLE_MASK_W 8
913 %macro PMOVSXWD 2; dst, src
925 ; Wrapper for non-FMA version of fmaddps
927 %if cpuflag(fma3) || cpuflag(fma4)
928 fmaddps %1, %2, %3, %4
954 %macro MOVHL 2 ; dst, src
958 punpckhqdq %1, %2, %2
960 pshufd %1, %2, q3232 ; pshufd is slow on some older CPUs, so only use it on more modern ones
962 movhlps %1, %2 ; may cause an int/float domain transition and has a dependency on dst
966 ; Horizontal Sum of Packed Single precision floats
967 ; The resulting sum is in all elements.
968 %macro HSUMPS 2 ; dst/src, tmp
970 %if sizeof%1>=32 ; avx
971 vperm2f128 %2, %1, %1, (0)*16+(1)
974 shufps %2, %1, %1, q1032
976 shufps %2, %1, %1, q0321
978 %else ; this form is a bit faster than the short avx-like emulation.
985 ; all %1 members should be equal for as long as float a+b==b+a
989 ; Emulate blendvps if not available
991 ; src_b is destroyed when using emulation with logical operands
992 ; SSE41 blendv instruction is hard coded to use xmm0 as mask
993 %macro BLENDVPS 3 ; dst/src_a, src_b, mask
995 blendvps %1, %1, %2, %3
998 %error sse41 blendvps uses xmm0 as default 3d operand, you used %3
1008 ; Emulate pblendvb if not available
1010 ; src_b is destroyed when using emulation with logical operands
1011 ; SSE41 blendv instruction is hard coded to use xmm0 as mask
1012 %macro PBLENDVB 3 ; dst/src_a, src_b, mask
1014 %if cpuflag(avx) && notcpuflag(avx2) && sizeof%1 >= 32
1015 %error pblendb not possible with ymm on avx1, try blendvps.
1017 pblendvb %1, %1, %2, %3
1020 %error sse41 pblendvd uses xmm0 as default 3d operand, you used %3