1 ;*****************************************************************************
2 ;* x86inc.asm: x264asm abstraction layer
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2012 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Anton Mitrofanov <BugMaster@narod.ru>
8 ;* Jason Garrett-Glaser <darkshikari@gmail.com>
9 ;* Henrik Gramner <hengar-6@student.ltu.se>
11 ;* Permission to use, copy, modify, and/or distribute this software for any
12 ;* purpose with or without fee is hereby granted, provided that the above
13 ;* copyright notice and this permission notice appear in all copies.
15 ;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
16 ;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 ;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
18 ;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 ;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ;*****************************************************************************
24 ; This is a header file for the x264ASM assembly language, which uses
25 ; NASM/YASM syntax combined with a large number of macros to provide easy
26 ; abstraction between different calling conventions (x86_32, win64, linux64).
27 ; It also has various other useful features to simplify writing the kind of
28 ; DSP functions that are most often used in x264.
30 ; Unlike the rest of x264, this file is available under an ISC license, as it
31 ; has significant usefulness outside of x264 and we want it to be available
32 ; to the largest audience possible. Of course, if you modify it for your own
33 ; purposes to add a new feature, we strongly encourage contributing a patch
34 ; as this feature might be useful for others as well. Send patches or ideas
35 ; to x264-devel@videolan.org .
37 %ifndef private_prefix
38 %define private_prefix x264
42 %define public_prefix private_prefix
48 %ifidn __OUTPUT_FORMAT__,win32
50 %elifidn __OUTPUT_FORMAT__,win64
58 %define mangle(x) _ %+ x
63 ; Name of the .rodata section.
64 %macro SECTION_RODATA 0-1 16
65 ; Kludge: Something on OS X fails to align .rodata even given an align
66 ; attribute, so use a different read-only section. This has been fixed in
67 ; yasm 0.8.0 and nasm 2.6.
68 %ifdef __YASM_VERSION_ID__
69 %if __YASM_VERSION_ID__ < 00080000h
70 %define NEED_MACHO_RODATA_KLUDGE
72 %elifdef __NASM_VERSION_ID__
73 %if __NASM_VERSION_ID__ < 02060000h
74 %define NEED_MACHO_RODATA_KLUDGE
78 %ifidn __OUTPUT_FORMAT__,aout
81 %ifndef NEED_MACHO_RODATA_KLUDGE
82 SECTION .rodata align=%1
84 %ifidn __OUTPUT_FORMAT__,macho64
85 SECTION .text align=%1
86 %elifidn __OUTPUT_FORMAT__,macho
87 SECTION .text align=%1
90 SECTION .rodata align=%1
95 %undef NEED_MACHO_RODATA_KLUDGE
98 ; aout does not support align=
99 %macro SECTION_TEXT 0-1 16
100 %ifidn __OUTPUT_FORMAT__,aout
103 SECTION .text align=%1
109 %elif ARCH_X86_64 == 0
110 ; x86_32 doesn't require PIC.
111 ; Some distros prefer shared objects to be PIC, but nothing breaks if
112 ; the code contains a few textrels, so we'll skip that complexity.
125 ; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
128 ; Macros to eliminate most code duplication between x86_32 and x86_64:
129 ; Currently this works only for leaf functions which load all their arguments
130 ; into registers at the start, and make no other use of the stack. Luckily that
131 ; covers most of x264's asm.
134 ; %1 = number of arguments. loads them from stack if needed.
135 ; %2 = number of registers used. pushes callee-saved regs if needed.
136 ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
137 ; %4 = (optional) stack size to be allocated. If not aligned (x86-32 ICC 10.x,
138 ; MSVC or YMM), the stack will be manually aligned (to 16 or 32 bytes),
139 ; and an extra register will be allocated to hold the original stack
140 ; pointer (to not invalidate r0m etc.). To prevent the use of an extra
141 ; register as stack pointer, request a negative stack size.
142 ; %4+/%5+ = list of names to define to registers
143 ; PROLOGUE can also be invoked by adding the same options to cglobal
146 ; cglobal foo, 2,3,0, dst, src, tmp
147 ; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
149 ; TODO Some functions can use some args directly from the stack. If they're the
150 ; last args then you can just not declare them, but if they're in the middle
151 ; we need more flexible macro.
154 ; Pops anything that was pushed by PROLOGUE, and returns.
157 ; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
158 ; which are slow when a normal ret follows a branch.
161 ; rN and rNq are the native-size register holding function argument N
162 ; rNd, rNw, rNb are dword, word, and byte size
163 ; rNh is the high 8 bits of the word size
164 ; rNm is the original location of arg N (a register or on the stack), dword
165 ; rNmp is native size
167 %macro DECLARE_REG 2-3
177 %elif ARCH_X86_64 ; memory
178 %define r%1m [rstk + stack_offset + %3]
179 %define r%1mp qword r %+ %1 %+ m
181 %define r%1m [rstk + stack_offset + %3]
182 %define r%1mp dword r %+ %1 %+ m
187 %macro DECLARE_REG_SIZE 3
203 DECLARE_REG_SIZE ax, al, ah
204 DECLARE_REG_SIZE bx, bl, bh
205 DECLARE_REG_SIZE cx, cl, ch
206 DECLARE_REG_SIZE dx, dl, dh
207 DECLARE_REG_SIZE si, sil, null
208 DECLARE_REG_SIZE di, dil, null
209 DECLARE_REG_SIZE bp, bpl, null
211 ; t# defines for when per-arch register allocation is more complex than just function arguments
213 %macro DECLARE_REG_TMP 1-*
216 CAT_XDEFINE t, %%i, r%1
222 %macro DECLARE_REG_TMP_SIZE 0-*
224 %define t%1q t%1 %+ q
225 %define t%1d t%1 %+ d
226 %define t%1w t%1 %+ w
227 %define t%1h t%1 %+ h
228 %define t%1b t%1 %+ b
233 DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
244 %assign stack_offset stack_offset+gprsize
251 %assign stack_offset stack_offset-gprsize
255 %macro PUSH_IF_USED 1-*
264 %macro POP_IF_USED 1-*
273 %macro LOAD_IF_USED 1-*
276 mov r%1, r %+ %1 %+ mp
285 %assign stack_offset stack_offset+(%2)
292 %assign stack_offset stack_offset-(%2)
302 %macro movsxdifnidn 2
314 %macro DEFINE_ARGS 0-*
318 CAT_UNDEF arg_name %+ %%i, q
319 CAT_UNDEF arg_name %+ %%i, d
320 CAT_UNDEF arg_name %+ %%i, w
321 CAT_UNDEF arg_name %+ %%i, h
322 CAT_UNDEF arg_name %+ %%i, b
323 CAT_UNDEF arg_name %+ %%i, m
324 CAT_UNDEF arg_name %+ %%i, mp
325 CAT_UNDEF arg_name, %%i
330 %xdefine %%stack_offset stack_offset
331 %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
334 %xdefine %1q r %+ %%i %+ q
335 %xdefine %1d r %+ %%i %+ d
336 %xdefine %1w r %+ %%i %+ w
337 %xdefine %1h r %+ %%i %+ h
338 %xdefine %1b r %+ %%i %+ b
339 %xdefine %1m r %+ %%i %+ m
340 %xdefine %1mp r %+ %%i %+ mp
341 CAT_XDEFINE arg_name, %%i, %1
345 %xdefine stack_offset %%stack_offset
346 %assign n_arg_names %0
349 %macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
352 %assign %%stack_alignment ((mmsize + 15) & ~15)
353 %assign stack_size %1
355 %assign stack_size -stack_size
358 %assign xmm_regs_used %2
360 %if mmsize <= 16 && HAVE_ALIGNED_STACK
361 %assign stack_size_padded stack_size + %%stack_alignment - gprsize - (stack_offset & (%%stack_alignment - 1))
362 %if xmm_regs_used > 6
363 %assign stack_size_padded stack_size_padded + (xmm_regs_used - 6) * 16
365 SUB rsp, stack_size_padded
367 %assign %%reg_num (regs_used - 1)
368 %xdefine rstk r %+ %%reg_num
369 ; align stack, and save original stack location directly above
370 ; it, i.e. in [rsp+stack_size_padded], so we can restore the
371 ; stack in a single instruction (i.e. mov rsp, rstk or mov
372 ; rsp, [rsp+stack_size_padded])
374 %assign stack_size_padded stack_size
375 %if xmm_regs_used > 6
376 %assign stack_size_padded stack_size_padded + (xmm_regs_used - 6) * 16
377 %if mmsize == 32 && xmm_regs_used & 1
378 ; re-align to 32 bytes
379 %assign stack_size_padded (stack_size_padded + 16)
382 %if %1 < 0 ; need to store rsp on stack
383 sub rsp, gprsize+stack_size_padded
384 and rsp, ~(%%stack_alignment-1)
385 %xdefine rstkm [rsp+stack_size_padded]
387 %else ; can keep rsp in rstk during whole function
388 sub rsp, stack_size_padded
389 and rsp, ~(%%stack_alignment-1)
393 %if xmm_regs_used > 6
400 %macro SETUP_STACK_POINTER 1
402 %if %1 != 0 && (HAVE_ALIGNED_STACK == 0 || mmsize == 32)
404 %assign regs_used (regs_used + 1)
405 %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2
406 %warning "Stack pointer will overwrite register argument"
412 %macro DEFINE_ARGS_INTERNAL 3+
422 %if WIN64 ; Windows x64 ;=================================================
428 DECLARE_REG 4, R10, 40
429 DECLARE_REG 5, R11, 48
430 DECLARE_REG 6, rax, 56
431 DECLARE_REG 7, rdi, 64
432 DECLARE_REG 8, rsi, 72
433 DECLARE_REG 9, rbx, 80
434 DECLARE_REG 10, rbp, 88
435 DECLARE_REG 11, R12, 96
436 DECLARE_REG 12, R13, 104
437 DECLARE_REG 13, R14, 112
438 DECLARE_REG 14, R15, 120
440 %macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
443 ASSERT regs_used >= num_args
444 SETUP_STACK_POINTER %4
445 ASSERT regs_used <= 15
446 PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
448 %if mmsize != 8 && stack_size == 0
451 LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
452 DEFINE_ARGS_INTERNAL %0, %4, %5
455 %macro WIN64_PUSH_XMM 0
456 %assign %%i xmm_regs_used
457 %rep (xmm_regs_used-6)
459 movdqa [rsp + (%%i-6)*16 + stack_size + (~stack_offset&8)], xmm %+ %%i
463 %macro WIN64_SPILL_XMM 1
464 %assign xmm_regs_used %1
465 ASSERT xmm_regs_used <= 16
466 %if xmm_regs_used > 6
467 SUB rsp, (xmm_regs_used-6)*16+16
472 %macro WIN64_RESTORE_XMM_INTERNAL 1
473 %if xmm_regs_used > 6
474 %assign %%i xmm_regs_used
475 %rep (xmm_regs_used-6)
477 movdqa xmm %+ %%i, [%1 + (%%i-6)*16+stack_size+(~stack_offset&8)]
479 %if stack_size_padded == 0
480 add %1, (xmm_regs_used-6)*16+16
483 %if stack_size_padded > 0
484 %if stack_size > 0 && (mmsize == 32 || HAVE_ALIGNED_STACK == 0)
487 add %1, stack_size_padded
492 %macro WIN64_RESTORE_XMM 1
493 WIN64_RESTORE_XMM_INTERNAL %1
494 %assign stack_offset (stack_offset-stack_size_padded)
495 %assign xmm_regs_used 0
498 %define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0
501 WIN64_RESTORE_XMM_INTERNAL rsp
502 POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
509 %elif ARCH_X86_64 ; *nix x64 ;=============================================
517 DECLARE_REG 6, rax, 8
518 DECLARE_REG 7, R10, 16
519 DECLARE_REG 8, R11, 24
520 DECLARE_REG 9, rbx, 32
521 DECLARE_REG 10, rbp, 40
522 DECLARE_REG 11, R12, 48
523 DECLARE_REG 12, R13, 56
524 DECLARE_REG 13, R14, 64
525 DECLARE_REG 14, R15, 72
527 %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
530 ASSERT regs_used >= num_args
531 SETUP_STACK_POINTER %4
532 ASSERT regs_used <= 15
533 PUSH_IF_USED 9, 10, 11, 12, 13, 14
535 LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
536 DEFINE_ARGS_INTERNAL %0, %4, %5
539 %define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
542 %if stack_size_padded > 0
543 %if mmsize == 32 || HAVE_ALIGNED_STACK == 0
546 add rsp, stack_size_padded
549 POP_IF_USED 14, 13, 12, 11, 10, 9
556 %else ; X86_32 ;==============================================================
558 DECLARE_REG 0, eax, 4
559 DECLARE_REG 1, ecx, 8
560 DECLARE_REG 2, edx, 12
561 DECLARE_REG 3, ebx, 16
562 DECLARE_REG 4, esi, 20
563 DECLARE_REG 5, edi, 24
564 DECLARE_REG 6, ebp, 28
567 %macro DECLARE_ARG 1-*
569 %define r%1m [rstk + stack_offset + 4*%1 + 4]
570 %define r%1mp dword r%1m
575 DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
577 %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
580 ASSERT regs_used >= num_args
587 SETUP_STACK_POINTER %4
588 ASSERT regs_used <= 7
589 PUSH_IF_USED 3, 4, 5, 6
591 LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
592 DEFINE_ARGS_INTERNAL %0, %4, %5
595 %define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
598 %if stack_size_padded > 0
599 %if mmsize == 32 || HAVE_ALIGNED_STACK == 0
602 add rsp, stack_size_padded
605 POP_IF_USED 6, 5, 4, 3
612 %endif ;======================================================================
615 %macro WIN64_SPILL_XMM 1
617 %macro WIN64_RESTORE_XMM 1
619 %macro WIN64_PUSH_XMM 0
631 %macro TAIL_CALL 2 ; callee, is_nonadjacent
640 ;=============================================================================
641 ; arch-independent part
642 ;=============================================================================
644 %assign function_align 16
647 ; Applies any symbol mangling needed for C linkage, and sets up a define such that
648 ; subsequent uses of the function name automatically refer to the mangled version.
649 ; Appends cpuflags to the function name if cpuflags has been specified.
650 ; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
651 ; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
652 %macro cglobal 1-2+ "" ; name, [PROLOGUE args]
653 cglobal_internal 1, %1 %+ SUFFIX, %2
655 %macro cvisible 1-2+ "" ; name, [PROLOGUE args]
656 cglobal_internal 0, %1 %+ SUFFIX, %2
658 %macro cglobal_internal 2-3+
660 %xdefine %%FUNCTION_PREFIX private_prefix
661 %xdefine %%VISIBILITY hidden
663 %xdefine %%FUNCTION_PREFIX public_prefix
664 %xdefine %%VISIBILITY
667 %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
668 %xdefine %2.skip_prologue %2 %+ .skip_prologue
669 CAT_XDEFINE cglobaled_, %2, 1
671 %xdefine current_function %2
672 %ifidn __OUTPUT_FORMAT__,elf
673 global %2:function %%VISIBILITY
679 RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
681 %assign stack_offset 0
683 %assign stack_size_padded 0
684 %assign xmm_regs_used 0
691 %xdefine %1 mangle(private_prefix %+ _ %+ %1)
692 CAT_XDEFINE cglobaled_, %1, 1
696 ; like cextern, but without the prefix
697 %macro cextern_naked 1
698 %xdefine %1 mangle(%1)
699 CAT_XDEFINE cglobaled_, %1, 1
704 %xdefine %1 mangle(private_prefix %+ _ %+ %1)
709 ; This is needed for ELF, otherwise the GNU linker assumes the stack is
710 ; executable by default.
711 %ifidn __OUTPUT_FORMAT__,elf
712 SECTION .note.GNU-stack noalloc noexec nowrite progbits
717 %assign cpuflags_mmx (1<<0)
718 %assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
719 %assign cpuflags_3dnow (1<<2) | cpuflags_mmx
720 %assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
721 %assign cpuflags_sse (1<<4) | cpuflags_mmx2
722 %assign cpuflags_sse2 (1<<5) | cpuflags_sse
723 %assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
724 %assign cpuflags_sse3 (1<<7) | cpuflags_sse2
725 %assign cpuflags_ssse3 (1<<8) | cpuflags_sse3
726 %assign cpuflags_sse4 (1<<9) | cpuflags_ssse3
727 %assign cpuflags_sse42 (1<<10)| cpuflags_sse4
728 %assign cpuflags_avx (1<<11)| cpuflags_sse42
729 %assign cpuflags_xop (1<<12)| cpuflags_avx
730 %assign cpuflags_fma4 (1<<13)| cpuflags_avx
731 %assign cpuflags_avx2 (1<<14)| cpuflags_avx
732 %assign cpuflags_fma3 (1<<15)| cpuflags_avx
734 %assign cpuflags_cache32 (1<<16)
735 %assign cpuflags_cache64 (1<<17)
736 %assign cpuflags_slowctz (1<<18)
737 %assign cpuflags_lzcnt (1<<19)
738 %assign cpuflags_misalign (1<<20)
739 %assign cpuflags_aligned (1<<21) ; not a cpu feature, but a function variant
740 %assign cpuflags_atom (1<<22)
741 %assign cpuflags_bmi1 (1<<23)
742 %assign cpuflags_bmi2 (1<<24)|cpuflags_bmi1
743 %assign cpuflags_tbm (1<<25)|cpuflags_bmi1
745 %define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
746 %define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
748 ; Takes up to 2 cpuflags from the above list.
749 ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
750 ; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
751 %macro INIT_CPUFLAGS 0-2
755 %assign cpuflags cpuflags_%1
757 %xdefine cpuname %1_%2
758 %assign cpuflags cpuflags | cpuflags_%2
760 %xdefine SUFFIX _ %+ cpuname
762 %assign avx_enabled 1
764 %if mmsize == 16 && notcpuflag(sse2)
767 %define movnta movntps
795 %assign avx_enabled 0
796 %define RESET_MM_PERMUTATION INIT_MMX %1
802 %define movnta movntq
805 CAT_XDEFINE m, %%i, mm %+ %%i
806 CAT_XDEFINE nmm, %%i, %%i
818 %assign avx_enabled 0
819 %define RESET_MM_PERMUTATION INIT_XMM %1
823 %define num_mmregs 16
828 %define movnta movntdq
831 CAT_XDEFINE m, %%i, xmm %+ %%i
832 CAT_XDEFINE nxmm, %%i, %%i
838 ; FIXME: INIT_AVX can be replaced by INIT_XMM avx
841 %assign avx_enabled 1
842 %define PALIGNR PALIGNR_SSSE3
843 %define RESET_MM_PERMUTATION INIT_AVX
847 %assign avx_enabled 1
848 %define RESET_MM_PERMUTATION INIT_YMM %1
852 %define num_mmregs 16
857 %define movnta vmovntps
860 CAT_XDEFINE m, %%i, ymm %+ %%i
861 CAT_XDEFINE nymm, %%i, %%i
869 ; I often want to use macros that permute their arguments. e.g. there's no
870 ; efficient way to implement butterfly or transpose or dct without swapping some
873 ; I would like to not have to manually keep track of the permutations:
874 ; If I insert a permutation in the middle of a function, it should automatically
875 ; change everything that follows. For more complex macros I may also have multiple
876 ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
878 ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
879 ; permutes its arguments. It's equivalent to exchanging the contents of the
880 ; registers, except that this way you exchange the register names instead, so it
881 ; doesn't cost any cycles.
883 %macro PERMUTE 2-* ; takes a list of pairs to swap
898 %macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
904 CAT_XDEFINE n, m%1, %1
905 CAT_XDEFINE n, m%2, %2
907 ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
908 ; Be careful using this mode in nested macros though, as in some cases there may be
909 ; other copies of m# that have already been dereferenced and don't get updated correctly.
910 %xdefine %%n1 n %+ %1
911 %xdefine %%n2 n %+ %2
912 %xdefine tmp m %+ %%n1
913 CAT_XDEFINE m, %%n1, m %+ %%n2
914 CAT_XDEFINE m, %%n2, tmp
915 CAT_XDEFINE n, m %+ %%n1, %%n1
916 CAT_XDEFINE n, m %+ %%n2, %%n2
923 ; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
924 ; calls to that function will automatically load the permutation, so values can
925 ; be returned in mmregs.
926 %macro SAVE_MM_PERMUTATION 0-1
930 %xdefine %%f current_function %+ _m
934 CAT_XDEFINE %%f, %%i, m %+ %%i
939 %macro LOAD_MM_PERMUTATION 1 ; name to load from
943 CAT_XDEFINE m, %%i, %1_m %+ %%i
944 CAT_XDEFINE n, m %+ %%i, %%i
950 ; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
952 call_internal %1 %+ SUFFIX, %1
954 %macro call_internal 2
962 LOAD_MM_PERMUTATION %%i
965 ; Substitutions that reduce instruction size but are functionally equivalent
990 ;=============================================================================
991 ; AVX abstraction layer
992 ;=============================================================================
997 CAT_XDEFINE sizeofmm, i, 8
999 CAT_XDEFINE sizeofxmm, i, 16
1000 CAT_XDEFINE sizeofymm, i, 32
1005 %macro CHECK_AVX_INSTR_EMU 3-*
1006 %xdefine %%opcode %1
1010 %error non-avx emulation of ``%%opcode'' is not supported
1017 ;%2 == 1 if float, 0 if int
1018 ;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
1019 ;%4 == number of operands given
1021 %macro RUN_AVX_INSTR 6-7+
1023 %define %%sizeofreg sizeof%6
1025 %define %%sizeofreg sizeof%5
1027 %define %%sizeofreg mmsize
1037 %define %%regmov movq
1039 %define %%regmov movaps
1041 %define %%regmov movdqa
1046 %if avx_enabled && %%sizeofreg==16
1049 CHECK_AVX_INSTR_EMU {%1 %5, %6, %7}, %5, %7
1064 ; 3arg AVX ops with a memory arg can only have it in src2,
1065 ; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov).
1066 ; So, if the op is symmetric and the wrong one is memory, swap them.
1067 %macro RUN_AVX_INSTR1 8
1078 %if %%swap && %3 == 0 && %8 == 1
1079 RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6
1081 RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7
1086 ;%2 == 1 if float, 0 if int
1087 ;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
1088 ;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not
1090 %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4
1092 RUN_AVX_INSTR %6, %7, %8, 2, %1, %2
1094 RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9
1096 RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4
1098 RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5
1103 AVX_INSTR addpd, 1, 0, 1
1104 AVX_INSTR addps, 1, 0, 1
1105 AVX_INSTR addsd, 1, 0, 1
1106 AVX_INSTR addss, 1, 0, 1
1107 AVX_INSTR addsubpd, 1, 0, 0
1108 AVX_INSTR addsubps, 1, 0, 0
1109 AVX_INSTR andpd, 1, 0, 1
1110 AVX_INSTR andps, 1, 0, 1
1111 AVX_INSTR andnpd, 1, 0, 0
1112 AVX_INSTR andnps, 1, 0, 0
1113 AVX_INSTR blendpd, 1, 0, 0
1114 AVX_INSTR blendps, 1, 0, 0
1115 AVX_INSTR blendvpd, 1, 0, 0
1116 AVX_INSTR blendvps, 1, 0, 0
1117 AVX_INSTR cmppd, 1, 0, 0
1118 AVX_INSTR cmpps, 1, 0, 0
1119 AVX_INSTR cmpsd, 1, 0, 0
1120 AVX_INSTR cmpss, 1, 0, 0
1121 AVX_INSTR cvtdq2ps, 1, 0, 0
1122 AVX_INSTR cvtpd2dq, 1, 0, 0
1123 AVX_INSTR cvtps2dq, 1, 0, 0
1124 AVX_INSTR divpd, 1, 0, 0
1125 AVX_INSTR divps, 1, 0, 0
1126 AVX_INSTR divsd, 1, 0, 0
1127 AVX_INSTR divss, 1, 0, 0
1128 AVX_INSTR dppd, 1, 1, 0
1129 AVX_INSTR dpps, 1, 1, 0
1130 AVX_INSTR haddpd, 1, 0, 0
1131 AVX_INSTR haddps, 1, 0, 0
1132 AVX_INSTR hsubpd, 1, 0, 0
1133 AVX_INSTR hsubps, 1, 0, 0
1134 AVX_INSTR maxpd, 1, 0, 1
1135 AVX_INSTR maxps, 1, 0, 1
1136 AVX_INSTR maxsd, 1, 0, 1
1137 AVX_INSTR maxss, 1, 0, 1
1138 AVX_INSTR minpd, 1, 0, 1
1139 AVX_INSTR minps, 1, 0, 1
1140 AVX_INSTR minsd, 1, 0, 1
1141 AVX_INSTR minss, 1, 0, 1
1142 AVX_INSTR movhlps, 1, 0, 0
1143 AVX_INSTR movlhps, 1, 0, 0
1144 AVX_INSTR movsd, 1, 0, 0
1145 AVX_INSTR movss, 1, 0, 0
1146 AVX_INSTR mpsadbw, 0, 1, 0
1147 AVX_INSTR mulpd, 1, 0, 1
1148 AVX_INSTR mulps, 1, 0, 1
1149 AVX_INSTR mulsd, 1, 0, 1
1150 AVX_INSTR mulss, 1, 0, 1
1151 AVX_INSTR orpd, 1, 0, 1
1152 AVX_INSTR orps, 1, 0, 1
1153 AVX_INSTR pabsb, 0, 0, 0
1154 AVX_INSTR pabsw, 0, 0, 0
1155 AVX_INSTR pabsd, 0, 0, 0
1156 AVX_INSTR packsswb, 0, 0, 0
1157 AVX_INSTR packssdw, 0, 0, 0
1158 AVX_INSTR packuswb, 0, 0, 0
1159 AVX_INSTR packusdw, 0, 0, 0
1160 AVX_INSTR paddb, 0, 0, 1
1161 AVX_INSTR paddw, 0, 0, 1
1162 AVX_INSTR paddd, 0, 0, 1
1163 AVX_INSTR paddq, 0, 0, 1
1164 AVX_INSTR paddsb, 0, 0, 1
1165 AVX_INSTR paddsw, 0, 0, 1
1166 AVX_INSTR paddusb, 0, 0, 1
1167 AVX_INSTR paddusw, 0, 0, 1
1168 AVX_INSTR palignr, 0, 1, 0
1169 AVX_INSTR pand, 0, 0, 1
1170 AVX_INSTR pandn, 0, 0, 0
1171 AVX_INSTR pavgb, 0, 0, 1
1172 AVX_INSTR pavgw, 0, 0, 1
1173 AVX_INSTR pblendvb, 0, 0, 0
1174 AVX_INSTR pblendw, 0, 1, 0
1175 AVX_INSTR pcmpestri, 0, 0, 0
1176 AVX_INSTR pcmpestrm, 0, 0, 0
1177 AVX_INSTR pcmpistri, 0, 0, 0
1178 AVX_INSTR pcmpistrm, 0, 0, 0
1179 AVX_INSTR pcmpeqb, 0, 0, 1
1180 AVX_INSTR pcmpeqw, 0, 0, 1
1181 AVX_INSTR pcmpeqd, 0, 0, 1
1182 AVX_INSTR pcmpeqq, 0, 0, 1
1183 AVX_INSTR pcmpgtb, 0, 0, 0
1184 AVX_INSTR pcmpgtw, 0, 0, 0
1185 AVX_INSTR pcmpgtd, 0, 0, 0
1186 AVX_INSTR pcmpgtq, 0, 0, 0
1187 AVX_INSTR phaddw, 0, 0, 0
1188 AVX_INSTR phaddd, 0, 0, 0
1189 AVX_INSTR phaddsw, 0, 0, 0
1190 AVX_INSTR phsubw, 0, 0, 0
1191 AVX_INSTR phsubd, 0, 0, 0
1192 AVX_INSTR phsubsw, 0, 0, 0
1193 AVX_INSTR pmaddwd, 0, 0, 1
1194 AVX_INSTR pmaddubsw, 0, 0, 0
1195 AVX_INSTR pmaxsb, 0, 0, 1
1196 AVX_INSTR pmaxsw, 0, 0, 1
1197 AVX_INSTR pmaxsd, 0, 0, 1
1198 AVX_INSTR pmaxub, 0, 0, 1
1199 AVX_INSTR pmaxuw, 0, 0, 1
1200 AVX_INSTR pmaxud, 0, 0, 1
1201 AVX_INSTR pminsb, 0, 0, 1
1202 AVX_INSTR pminsw, 0, 0, 1
1203 AVX_INSTR pminsd, 0, 0, 1
1204 AVX_INSTR pminub, 0, 0, 1
1205 AVX_INSTR pminuw, 0, 0, 1
1206 AVX_INSTR pminud, 0, 0, 1
1207 AVX_INSTR pmovmskb, 0, 0, 0
1208 AVX_INSTR pmulhuw, 0, 0, 1
1209 AVX_INSTR pmulhrsw, 0, 0, 1
1210 AVX_INSTR pmulhw, 0, 0, 1
1211 AVX_INSTR pmullw, 0, 0, 1
1212 AVX_INSTR pmulld, 0, 0, 1
1213 AVX_INSTR pmuludq, 0, 0, 1
1214 AVX_INSTR pmuldq, 0, 0, 1
1215 AVX_INSTR por, 0, 0, 1
1216 AVX_INSTR psadbw, 0, 0, 1
1217 AVX_INSTR pshufb, 0, 0, 0
1218 AVX_INSTR pshufd, 0, 1, 0
1219 AVX_INSTR pshufhw, 0, 1, 0
1220 AVX_INSTR pshuflw, 0, 1, 0
1221 AVX_INSTR psignb, 0, 0, 0
1222 AVX_INSTR psignw, 0, 0, 0
1223 AVX_INSTR psignd, 0, 0, 0
1224 AVX_INSTR psllw, 0, 0, 0
1225 AVX_INSTR pslld, 0, 0, 0
1226 AVX_INSTR psllq, 0, 0, 0
1227 AVX_INSTR pslldq, 0, 0, 0
1228 AVX_INSTR psraw, 0, 0, 0
1229 AVX_INSTR psrad, 0, 0, 0
1230 AVX_INSTR psrlw, 0, 0, 0
1231 AVX_INSTR psrld, 0, 0, 0
1232 AVX_INSTR psrlq, 0, 0, 0
1233 AVX_INSTR psrldq, 0, 0, 0
1234 AVX_INSTR psubb, 0, 0, 0
1235 AVX_INSTR psubw, 0, 0, 0
1236 AVX_INSTR psubd, 0, 0, 0
1237 AVX_INSTR psubq, 0, 0, 0
1238 AVX_INSTR psubsb, 0, 0, 0
1239 AVX_INSTR psubsw, 0, 0, 0
1240 AVX_INSTR psubusb, 0, 0, 0
1241 AVX_INSTR psubusw, 0, 0, 0
1242 AVX_INSTR ptest, 0, 0, 0
1243 AVX_INSTR punpckhbw, 0, 0, 0
1244 AVX_INSTR punpckhwd, 0, 0, 0
1245 AVX_INSTR punpckhdq, 0, 0, 0
1246 AVX_INSTR punpckhqdq, 0, 0, 0
1247 AVX_INSTR punpcklbw, 0, 0, 0
1248 AVX_INSTR punpcklwd, 0, 0, 0
1249 AVX_INSTR punpckldq, 0, 0, 0
1250 AVX_INSTR punpcklqdq, 0, 0, 0
1251 AVX_INSTR pxor, 0, 0, 1
1252 AVX_INSTR shufps, 1, 1, 0
1253 AVX_INSTR subpd, 1, 0, 0
1254 AVX_INSTR subps, 1, 0, 0
1255 AVX_INSTR subsd, 1, 0, 0
1256 AVX_INSTR subss, 1, 0, 0
1257 AVX_INSTR unpckhpd, 1, 0, 0
1258 AVX_INSTR unpckhps, 1, 0, 0
1259 AVX_INSTR unpcklpd, 1, 0, 0
1260 AVX_INSTR unpcklps, 1, 0, 0
1261 AVX_INSTR xorpd, 1, 0, 1
1262 AVX_INSTR xorps, 1, 0, 1
1264 ; 3DNow instructions, for sharing code between AVX, SSE and 3DN
1265 AVX_INSTR pfadd, 1, 0, 1
1266 AVX_INSTR pfsub, 1, 0, 0
1267 AVX_INSTR pfmul, 1, 0, 1
1269 ; base-4 constants for shuffles
1272 %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
1274 CAT_XDEFINE q000, j, i
1276 CAT_XDEFINE q00, j, i
1278 CAT_XDEFINE q0, j, i
1288 %macro %1 5-8 %1, %2, %3
1289 %if cpuflag(xop) || cpuflag(fma4)
1303 FMA_INSTR fmaddps, mulps, addps
1304 FMA_INSTR pmacsdd, pmulld, paddd
1305 FMA_INSTR pmacsww, pmullw, paddw
1306 FMA_INSTR pmadcswd, pmaddwd, paddd
1308 ; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
1309 ; This lets us use tzcnt without bumping the yasm version requirement yet.
1310 %define tzcnt rep bsf