1 ;*****************************************************************************
2 ;* x86inc.asm: x264asm abstraction layer
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2013 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Anton Mitrofanov <BugMaster@narod.ru>
8 ;* Fiona Glaser <fiona@x264.com>
9 ;* Henrik Gramner <henrik@gramner.com>
11 ;* Permission to use, copy, modify, and/or distribute this software for any
12 ;* purpose with or without fee is hereby granted, provided that the above
13 ;* copyright notice and this permission notice appear in all copies.
15 ;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
16 ;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 ;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
18 ;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 ;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ;*****************************************************************************
24 ; This is a header file for the x264ASM assembly language, which uses
25 ; NASM/YASM syntax combined with a large number of macros to provide easy
26 ; abstraction between different calling conventions (x86_32, win64, linux64).
27 ; It also has various other useful features to simplify writing the kind of
28 ; DSP functions that are most often used in x264.
30 ; Unlike the rest of x264, this file is available under an ISC license, as it
31 ; has significant usefulness outside of x264 and we want it to be available
32 ; to the largest audience possible. Of course, if you modify it for your own
33 ; purposes to add a new feature, we strongly encourage contributing a patch
34 ; as this feature might be useful for others as well. Send patches or ideas
35 ; to x264-devel@videolan.org .
37 %ifndef private_prefix
38 %define private_prefix x264
42 %define public_prefix private_prefix
45 %if HAVE_ALIGNED_STACK
46 %define STACK_ALIGNMENT 16
48 %ifndef STACK_ALIGNMENT
50 %define STACK_ALIGNMENT 16
52 %define STACK_ALIGNMENT 4
59 %ifidn __OUTPUT_FORMAT__,win32
61 %elifidn __OUTPUT_FORMAT__,win64
63 %elifidn __OUTPUT_FORMAT__,x64
71 %define mangle(x) _ %+ x
76 ; aout does not support align=
77 ; NOTE: This section is out of sync with x264, in order to
78 ; keep supporting OS/2.
79 %macro SECTION_RODATA 0-1 16
80 %ifidn __OUTPUT_FORMAT__,aout
83 SECTION .rodata align=%1
87 %macro SECTION_TEXT 0-1 16
88 %ifidn __OUTPUT_FORMAT__,aout
91 SECTION .text align=%1
97 %elif ARCH_X86_64 == 0
98 ; x86_32 doesn't require PIC.
99 ; Some distros prefer shared objects to be PIC, but nothing breaks if
100 ; the code contains a few textrels, so we'll skip that complexity.
113 ; Macros to eliminate most code duplication between x86_32 and x86_64:
114 ; Currently this works only for leaf functions which load all their arguments
115 ; into registers at the start, and make no other use of the stack. Luckily that
116 ; covers most of x264's asm.
119 ; %1 = number of arguments. loads them from stack if needed.
120 ; %2 = number of registers used. pushes callee-saved regs if needed.
121 ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
122 ; %4 = (optional) stack size to be allocated. The stack will be aligned before
123 ; allocating the specified stack size. If the required stack alignment is
124 ; larger than the known stack alignment the stack will be manually aligned
125 ; and an extra register will be allocated to hold the original stack
126 ; pointer (to not invalidate r0m etc.). To prevent the use of an extra
127 ; register as stack pointer, request a negative stack size.
128 ; %4+/%5+ = list of names to define to registers
129 ; PROLOGUE can also be invoked by adding the same options to cglobal
132 ; cglobal foo, 2,3,7,0x40, dst, src, tmp
133 ; declares a function (foo) that automatically loads two arguments (dst and
134 ; src) into registers, uses one additional register (tmp) plus 7 vector
135 ; registers (m0-m6) and allocates 0x40 bytes of stack space.
137 ; TODO Some functions can use some args directly from the stack. If they're the
138 ; last args then you can just not declare them, but if they're in the middle
139 ; we need more flexible macro.
142 ; Pops anything that was pushed by PROLOGUE, and returns.
145 ; Use this instead of RET if it's a branch target.
148 ; rN and rNq are the native-size register holding function argument N
149 ; rNd, rNw, rNb are dword, word, and byte size
150 ; rNh is the high 8 bits of the word size
151 ; rNm is the original location of arg N (a register or on the stack), dword
152 ; rNmp is native size
154 %macro DECLARE_REG 2-3
164 %elif ARCH_X86_64 ; memory
165 %define r%1m [rstk + stack_offset + %3]
166 %define r%1mp qword r %+ %1 %+ m
168 %define r%1m [rstk + stack_offset + %3]
169 %define r%1mp dword r %+ %1 %+ m
174 %macro DECLARE_REG_SIZE 3
190 DECLARE_REG_SIZE ax, al, ah
191 DECLARE_REG_SIZE bx, bl, bh
192 DECLARE_REG_SIZE cx, cl, ch
193 DECLARE_REG_SIZE dx, dl, dh
194 DECLARE_REG_SIZE si, sil, null
195 DECLARE_REG_SIZE di, dil, null
196 DECLARE_REG_SIZE bp, bpl, null
198 ; t# defines for when per-arch register allocation is more complex than just function arguments
200 %macro DECLARE_REG_TMP 1-*
203 CAT_XDEFINE t, %%i, r%1
209 %macro DECLARE_REG_TMP_SIZE 0-*
211 %define t%1q t%1 %+ q
212 %define t%1d t%1 %+ d
213 %define t%1w t%1 %+ w
214 %define t%1h t%1 %+ h
215 %define t%1b t%1 %+ b
220 DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
231 %assign stack_offset stack_offset+gprsize
238 %assign stack_offset stack_offset-gprsize
242 %macro PUSH_IF_USED 1-*
251 %macro POP_IF_USED 1-*
260 %macro LOAD_IF_USED 1-*
263 mov r%1, r %+ %1 %+ mp
272 %assign stack_offset stack_offset+(%2)
279 %assign stack_offset stack_offset-(%2)
289 %macro movsxdifnidn 2
301 %macro DEFINE_ARGS 0-*
305 CAT_UNDEF arg_name %+ %%i, q
306 CAT_UNDEF arg_name %+ %%i, d
307 CAT_UNDEF arg_name %+ %%i, w
308 CAT_UNDEF arg_name %+ %%i, h
309 CAT_UNDEF arg_name %+ %%i, b
310 CAT_UNDEF arg_name %+ %%i, m
311 CAT_UNDEF arg_name %+ %%i, mp
312 CAT_UNDEF arg_name, %%i
317 %xdefine %%stack_offset stack_offset
318 %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
321 %xdefine %1q r %+ %%i %+ q
322 %xdefine %1d r %+ %%i %+ d
323 %xdefine %1w r %+ %%i %+ w
324 %xdefine %1h r %+ %%i %+ h
325 %xdefine %1b r %+ %%i %+ b
326 %xdefine %1m r %+ %%i %+ m
327 %xdefine %1mp r %+ %%i %+ mp
328 CAT_XDEFINE arg_name, %%i, %1
332 %xdefine stack_offset %%stack_offset
333 %assign n_arg_names %0
336 %define required_stack_alignment ((mmsize + 15) & ~15)
338 %macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
342 %assign stack_size %1
344 %assign stack_size -stack_size
347 %assign %%pad %%pad + 32 ; shadow space
349 %assign xmm_regs_used %2
350 %if xmm_regs_used > 8
351 %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers
355 %if required_stack_alignment <= STACK_ALIGNMENT
356 ; maintain the current stack alignment
357 %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
358 SUB rsp, stack_size_padded
360 %assign %%reg_num (regs_used - 1)
361 %xdefine rstk r %+ %%reg_num
362 ; align stack, and save original stack location directly above
363 ; it, i.e. in [rsp+stack_size_padded], so we can restore the
364 ; stack in a single instruction (i.e. mov rsp, rstk or mov
365 ; rsp, [rsp+stack_size_padded])
366 %if %1 < 0 ; need to store rsp on stack
367 %xdefine rstkm [rsp + stack_size + %%pad]
368 %assign %%pad %%pad + gprsize
369 %else ; can keep rsp in rstk during whole function
372 %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1))
374 and rsp, ~(required_stack_alignment-1)
375 sub rsp, stack_size_padded
376 movifnidn rstkm, rstk
383 %macro SETUP_STACK_POINTER 1
385 %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT
387 %assign regs_used (regs_used + 1)
388 %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2
389 %warning "Stack pointer will overwrite register argument"
395 %macro DEFINE_ARGS_INTERNAL 3+
405 %if WIN64 ; Windows x64 ;=================================================
411 DECLARE_REG 4, R10, 40
412 DECLARE_REG 5, R11, 48
413 DECLARE_REG 6, rax, 56
414 DECLARE_REG 7, rdi, 64
415 DECLARE_REG 8, rsi, 72
416 DECLARE_REG 9, rbx, 80
417 DECLARE_REG 10, rbp, 88
418 DECLARE_REG 11, R12, 96
419 DECLARE_REG 12, R13, 104
420 DECLARE_REG 13, R14, 112
421 DECLARE_REG 14, R15, 120
423 %macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
426 ASSERT regs_used >= num_args
427 SETUP_STACK_POINTER %4
428 ASSERT regs_used <= 15
429 PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
431 %if mmsize != 8 && stack_size == 0
434 LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
435 DEFINE_ARGS_INTERNAL %0, %4, %5
438 %macro WIN64_PUSH_XMM 0
439 ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated.
440 %if xmm_regs_used > 6
441 movaps [rstk + stack_offset + 8], xmm6
443 %if xmm_regs_used > 7
444 movaps [rstk + stack_offset + 24], xmm7
446 %if xmm_regs_used > 8
449 movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i
455 %macro WIN64_SPILL_XMM 1
456 %assign xmm_regs_used %1
457 ASSERT xmm_regs_used <= 16
458 %if xmm_regs_used > 8
459 ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack.
460 %assign %%pad (xmm_regs_used-8)*16 + 32
461 %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
462 SUB rsp, stack_size_padded
467 %macro WIN64_RESTORE_XMM_INTERNAL 1
469 %if xmm_regs_used > 8
470 %assign %%i xmm_regs_used
473 movaps xmm %+ %%i, [%1 + (%%i-8)*16 + stack_size + 32]
476 %if stack_size_padded > 0
477 %if stack_size > 0 && required_stack_alignment > STACK_ALIGNMENT
480 add %1, stack_size_padded
481 %assign %%pad_size stack_size_padded
484 %if xmm_regs_used > 7
485 movaps xmm7, [%1 + stack_offset - %%pad_size + 24]
487 %if xmm_regs_used > 6
488 movaps xmm6, [%1 + stack_offset - %%pad_size + 8]
492 %macro WIN64_RESTORE_XMM 1
493 WIN64_RESTORE_XMM_INTERNAL %1
494 %assign stack_offset (stack_offset-stack_size_padded)
495 %assign xmm_regs_used 0
498 %define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0
501 WIN64_RESTORE_XMM_INTERNAL rsp
502 POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
509 %elif ARCH_X86_64 ; *nix x64 ;=============================================
517 DECLARE_REG 6, rax, 8
518 DECLARE_REG 7, R10, 16
519 DECLARE_REG 8, R11, 24
520 DECLARE_REG 9, rbx, 32
521 DECLARE_REG 10, rbp, 40
522 DECLARE_REG 11, R12, 48
523 DECLARE_REG 12, R13, 56
524 DECLARE_REG 13, R14, 64
525 DECLARE_REG 14, R15, 72
527 %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
530 ASSERT regs_used >= num_args
531 SETUP_STACK_POINTER %4
532 ASSERT regs_used <= 15
533 PUSH_IF_USED 9, 10, 11, 12, 13, 14
535 LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
536 DEFINE_ARGS_INTERNAL %0, %4, %5
539 %define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
542 %if stack_size_padded > 0
543 %if required_stack_alignment > STACK_ALIGNMENT
546 add rsp, stack_size_padded
549 POP_IF_USED 14, 13, 12, 11, 10, 9
556 %else ; X86_32 ;==============================================================
558 DECLARE_REG 0, eax, 4
559 DECLARE_REG 1, ecx, 8
560 DECLARE_REG 2, edx, 12
561 DECLARE_REG 3, ebx, 16
562 DECLARE_REG 4, esi, 20
563 DECLARE_REG 5, edi, 24
564 DECLARE_REG 6, ebp, 28
567 %macro DECLARE_ARG 1-*
569 %define r%1m [rstk + stack_offset + 4*%1 + 4]
570 %define r%1mp dword r%1m
575 DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
577 %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
580 ASSERT regs_used >= num_args
587 SETUP_STACK_POINTER %4
588 ASSERT regs_used <= 7
589 PUSH_IF_USED 3, 4, 5, 6
591 LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
592 DEFINE_ARGS_INTERNAL %0, %4, %5
595 %define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
598 %if stack_size_padded > 0
599 %if required_stack_alignment > STACK_ALIGNMENT
602 add rsp, stack_size_padded
605 POP_IF_USED 6, 5, 4, 3
612 %endif ;======================================================================
615 %macro WIN64_SPILL_XMM 1
617 %macro WIN64_RESTORE_XMM 1
619 %macro WIN64_PUSH_XMM 0
623 ; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
624 ; a branch or a branch target. So switch to a 2-byte form of ret in that case.
625 ; We can automatically detect "follows a branch", but not a branch target.
626 ; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
635 %define last_branch_adr $$
636 %macro AUTO_REP_RET 0
638 times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ != last_branch_adr.
639 %elif notcpuflag(ssse3)
640 times ((last_branch_adr-$)>>31)+1 rep
645 %macro BRANCH_INSTR 0-*
650 %xdefine last_branch_adr %%branch_instr
656 BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp
658 %macro TAIL_CALL 2 ; callee, is_nonadjacent
667 ;=============================================================================
668 ; arch-independent part
669 ;=============================================================================
671 %assign function_align 16
674 ; Applies any symbol mangling needed for C linkage, and sets up a define such that
675 ; subsequent uses of the function name automatically refer to the mangled version.
676 ; Appends cpuflags to the function name if cpuflags has been specified.
677 ; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
678 ; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
679 %macro cglobal 1-2+ "" ; name, [PROLOGUE args]
680 cglobal_internal 1, %1 %+ SUFFIX, %2
682 %macro cvisible 1-2+ "" ; name, [PROLOGUE args]
683 cglobal_internal 0, %1 %+ SUFFIX, %2
685 %macro cglobal_internal 2-3+
687 %xdefine %%FUNCTION_PREFIX private_prefix
688 %xdefine %%VISIBILITY hidden
690 %xdefine %%FUNCTION_PREFIX public_prefix
691 %xdefine %%VISIBILITY
694 %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
695 %xdefine %2.skip_prologue %2 %+ .skip_prologue
696 CAT_XDEFINE cglobaled_, %2, 1
698 %xdefine current_function %2
699 %ifidn __OUTPUT_FORMAT__,elf
700 global %2:function %%VISIBILITY
706 RESET_MM_PERMUTATION ; needed for x86-64, also makes disassembly somewhat nicer
707 %xdefine rstk rsp ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required
708 %assign stack_offset 0 ; stack pointer offset relative to the return address
709 %assign stack_size 0 ; amount of stack space that can be freely used inside a function
710 %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding
711 %assign xmm_regs_used 0 ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64
718 %xdefine %1 mangle(private_prefix %+ _ %+ %1)
719 CAT_XDEFINE cglobaled_, %1, 1
723 ; like cextern, but without the prefix
724 %macro cextern_naked 1
725 %xdefine %1 mangle(%1)
726 CAT_XDEFINE cglobaled_, %1, 1
731 %xdefine %1 mangle(private_prefix %+ _ %+ %1)
732 %ifidn __OUTPUT_FORMAT__,elf
733 global %1:data hidden
740 ; This is needed for ELF, otherwise the GNU linker assumes the stack is
741 ; executable by default.
742 %ifidn __OUTPUT_FORMAT__,elf
743 [section .note.GNU-stack noalloc noexec nowrite progbits]
746 ; Overrides the default .text section.
747 ; Silences warnings when defining structures.
752 %assign cpuflags_mmx (1<<0)
753 %assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
754 %assign cpuflags_3dnow (1<<2) | cpuflags_mmx
755 %assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
756 %assign cpuflags_sse (1<<4) | cpuflags_mmx2
757 %assign cpuflags_sse2 (1<<5) | cpuflags_sse
758 %assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
759 %assign cpuflags_sse3 (1<<7) | cpuflags_sse2
760 %assign cpuflags_ssse3 (1<<8) | cpuflags_sse3
761 %assign cpuflags_sse4 (1<<9) | cpuflags_ssse3
762 %assign cpuflags_sse42 (1<<10)| cpuflags_sse4
763 %assign cpuflags_avx (1<<11)| cpuflags_sse42
764 %assign cpuflags_xop (1<<12)| cpuflags_avx
765 %assign cpuflags_fma4 (1<<13)| cpuflags_avx
766 %assign cpuflags_avx2 (1<<14)| cpuflags_avx
767 %assign cpuflags_fma3 (1<<15)| cpuflags_avx
769 %assign cpuflags_cache32 (1<<16)
770 %assign cpuflags_cache64 (1<<17)
771 %assign cpuflags_slowctz (1<<18)
772 %assign cpuflags_lzcnt (1<<19)
773 %assign cpuflags_aligned (1<<20) ; not a cpu feature, but a function variant
774 %assign cpuflags_atom (1<<21)
775 %assign cpuflags_bmi1 (1<<22)|cpuflags_lzcnt
776 %assign cpuflags_bmi2 (1<<23)|cpuflags_bmi1
778 %define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
779 %define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
781 ; Takes an arbitrary number of cpuflags from the above list.
782 ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
783 ; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
784 %macro INIT_CPUFLAGS 0-*
792 %xdefine cpuname cpuname %+ _%1
796 %assign cpuflags cpuflags | cpuflags_%1
799 %xdefine SUFFIX _ %+ cpuname
802 %assign avx_enabled 1
804 %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2))
807 %define movnta movntps
811 %elif cpuflag(sse3) && notcpuflag(ssse3)
824 ; m# is a simd register of the currently selected size
825 ; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m#
826 ; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m#
827 ; (All 3 remain in sync through SWAP.)
838 %assign avx_enabled 0
839 %define RESET_MM_PERMUTATION INIT_MMX %1
845 %define movnta movntq
848 CAT_XDEFINE m, %%i, mm %+ %%i
849 CAT_XDEFINE nnmm, %%i, %%i
861 %assign avx_enabled 0
862 %define RESET_MM_PERMUTATION INIT_XMM %1
866 %define num_mmregs 16
871 %define movnta movntdq
874 CAT_XDEFINE m, %%i, xmm %+ %%i
875 CAT_XDEFINE nnxmm, %%i, %%i
882 %assign avx_enabled 1
883 %define RESET_MM_PERMUTATION INIT_YMM %1
887 %define num_mmregs 16
892 %define movnta movntdq
895 CAT_XDEFINE m, %%i, ymm %+ %%i
896 CAT_XDEFINE nnymm, %%i, %%i
904 %macro DECLARE_MMCAST 1
909 %define xmmxmm%1 xmm%1
910 %define xmmymm%1 xmm%1
912 %define ymmxmm%1 xmm%1
913 %define ymmymm%1 ymm%1
914 %define xm%1 xmm %+ m%1
915 %define ym%1 ymm %+ m%1
924 ; I often want to use macros that permute their arguments. e.g. there's no
925 ; efficient way to implement butterfly or transpose or dct without swapping some
928 ; I would like to not have to manually keep track of the permutations:
929 ; If I insert a permutation in the middle of a function, it should automatically
930 ; change everything that follows. For more complex macros I may also have multiple
931 ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
933 ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
934 ; permutes its arguments. It's equivalent to exchanging the contents of the
935 ; registers, except that this way you exchange the register names instead, so it
936 ; doesn't cost any cycles.
938 %macro PERMUTE 2-* ; takes a list of pairs to swap
945 CAT_XDEFINE nn, m%1, %1
950 %macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
951 %ifnum %1 ; SWAP 0, 1, ...
952 SWAP_INTERNAL_NUM %1, %2
953 %else ; SWAP m0, m1, ...
954 SWAP_INTERNAL_NAME %1, %2
958 %macro SWAP_INTERNAL_NUM 2-*
963 CAT_XDEFINE nn, m%1, %1
964 CAT_XDEFINE nn, m%2, %2
969 %macro SWAP_INTERNAL_NAME 2-*
970 %xdefine %%args nn %+ %1
972 %xdefine %%args %%args, nn %+ %2
975 SWAP_INTERNAL_NUM %%args
978 ; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
979 ; calls to that function will automatically load the permutation, so values can
980 ; be returned in mmregs.
981 %macro SAVE_MM_PERMUTATION 0-1
985 %xdefine %%f current_function %+ _m
989 CAT_XDEFINE %%f, %%i, m %+ %%i
994 %macro LOAD_MM_PERMUTATION 1 ; name to load from
998 CAT_XDEFINE m, %%i, %1_m %+ %%i
999 CAT_XDEFINE nn, m %+ %%i, %%i
1005 ; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
1007 call_internal %1 %+ SUFFIX, %1
1009 %macro call_internal 2
1011 %ifndef cglobaled_%2
1017 LOAD_MM_PERMUTATION %%i
1020 ; Substitutions that reduce instruction size but are functionally equivalent
1045 ;=============================================================================
1046 ; AVX abstraction layer
1047 ;=============================================================================
1052 CAT_XDEFINE sizeofmm, i, 8
1054 CAT_XDEFINE sizeofxmm, i, 16
1055 CAT_XDEFINE sizeofymm, i, 32
1060 %macro CHECK_AVX_INSTR_EMU 3-*
1061 %xdefine %%opcode %1
1065 %error non-avx emulation of ``%%opcode'' is not supported
1072 ;%2 == minimal instruction set
1073 ;%3 == 1 if float, 0 if int
1074 ;%4 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
1075 ;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
1077 %macro RUN_AVX_INSTR 6-9+
1079 %assign __sizeofreg sizeof%7
1081 %assign __sizeofreg sizeof%6
1083 %assign __sizeofreg mmsize
1085 %assign __emulate_avx 0
1086 %if avx_enabled && __sizeofreg >= 16
1087 %xdefine __instr v%1
1091 %assign __emulate_avx 1
1097 %error use of ``%1'' %2 instruction in cpuname function: current_function
1098 %elif cpuflags_%2 < cpuflags_sse && notcpuflag(sse2) && __sizeofreg > 8
1099 %error use of ``%1'' sse2 instruction in cpuname function: current_function
1109 CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, %8, %9
1111 CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, %8
1115 ; 3-operand AVX instructions with a memory arg can only have it in src2,
1116 ; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
1117 ; So, if the instruction is commutative with a memory arg, swap them.
1122 %if __sizeofreg == 8
1136 __instr %6, %7, %8, %9
1147 ;%2 == minimal instruction set
1148 ;%3 == 1 if float, 0 if int
1149 ;%4 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
1150 ;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
1151 %macro AVX_INSTR 1-5 fnord, 0, 1, 0
1152 %macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5
1154 RUN_AVX_INSTR %6, %7, %8, %9, %10, %1
1156 RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2
1158 RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3
1160 RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4
1162 RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4, %5
1167 ; Instructions with both VEX and non-VEX encodings
1168 ; Non-destructive instructions are written without parameters
1169 AVX_INSTR addpd, sse2, 1, 0, 1
1170 AVX_INSTR addps, sse, 1, 0, 1
1171 AVX_INSTR addsd, sse2, 1, 0, 1
1172 AVX_INSTR addss, sse, 1, 0, 1
1173 AVX_INSTR addsubpd, sse3, 1, 0, 0
1174 AVX_INSTR addsubps, sse3, 1, 0, 0
1175 AVX_INSTR aesdec, fnord, 0, 0, 0
1176 AVX_INSTR aesdeclast, fnord, 0, 0, 0
1177 AVX_INSTR aesenc, fnord, 0, 0, 0
1178 AVX_INSTR aesenclast, fnord, 0, 0, 0
1180 AVX_INSTR aeskeygenassist
1181 AVX_INSTR andnpd, sse2, 1, 0, 0
1182 AVX_INSTR andnps, sse, 1, 0, 0
1183 AVX_INSTR andpd, sse2, 1, 0, 1
1184 AVX_INSTR andps, sse, 1, 0, 1
1185 AVX_INSTR blendpd, sse4, 1, 0, 0
1186 AVX_INSTR blendps, sse4, 1, 0, 0
1187 AVX_INSTR blendvpd, sse4, 1, 0, 0
1188 AVX_INSTR blendvps, sse4, 1, 0, 0
1189 AVX_INSTR cmppd, sse2, 1, 1, 0
1190 AVX_INSTR cmpps, sse, 1, 1, 0
1191 AVX_INSTR cmpsd, sse2, 1, 1, 0
1192 AVX_INSTR cmpss, sse, 1, 1, 0
1193 AVX_INSTR comisd, sse2
1194 AVX_INSTR comiss, sse
1195 AVX_INSTR cvtdq2pd, sse2
1196 AVX_INSTR cvtdq2ps, sse2
1197 AVX_INSTR cvtpd2dq, sse2
1198 AVX_INSTR cvtpd2ps, sse2
1199 AVX_INSTR cvtps2dq, sse2
1200 AVX_INSTR cvtps2pd, sse2
1201 AVX_INSTR cvtsd2si, sse2
1202 AVX_INSTR cvtsd2ss, sse2
1203 AVX_INSTR cvtsi2sd, sse2
1204 AVX_INSTR cvtsi2ss, sse
1205 AVX_INSTR cvtss2sd, sse2
1206 AVX_INSTR cvtss2si, sse
1207 AVX_INSTR cvttpd2dq, sse2
1208 AVX_INSTR cvttps2dq, sse2
1209 AVX_INSTR cvttsd2si, sse2
1210 AVX_INSTR cvttss2si, sse
1211 AVX_INSTR divpd, sse2, 1, 0, 0
1212 AVX_INSTR divps, sse, 1, 0, 0
1213 AVX_INSTR divsd, sse2, 1, 0, 0
1214 AVX_INSTR divss, sse, 1, 0, 0
1215 AVX_INSTR dppd, sse4, 1, 1, 0
1216 AVX_INSTR dpps, sse4, 1, 1, 0
1217 AVX_INSTR extractps, sse4
1218 AVX_INSTR haddpd, sse3, 1, 0, 0
1219 AVX_INSTR haddps, sse3, 1, 0, 0
1220 AVX_INSTR hsubpd, sse3, 1, 0, 0
1221 AVX_INSTR hsubps, sse3, 1, 0, 0
1222 AVX_INSTR insertps, sse4, 1, 1, 0
1223 AVX_INSTR lddqu, sse3
1224 AVX_INSTR ldmxcsr, sse
1225 AVX_INSTR maskmovdqu, sse2
1226 AVX_INSTR maxpd, sse2, 1, 0, 1
1227 AVX_INSTR maxps, sse, 1, 0, 1
1228 AVX_INSTR maxsd, sse2, 1, 0, 1
1229 AVX_INSTR maxss, sse, 1, 0, 1
1230 AVX_INSTR minpd, sse2, 1, 0, 1
1231 AVX_INSTR minps, sse, 1, 0, 1
1232 AVX_INSTR minsd, sse2, 1, 0, 1
1233 AVX_INSTR minss, sse, 1, 0, 1
1234 AVX_INSTR movapd, sse2
1235 AVX_INSTR movaps, sse
1237 AVX_INSTR movddup, sse3
1238 AVX_INSTR movdqa, sse2
1239 AVX_INSTR movdqu, sse2
1240 AVX_INSTR movhlps, sse, 1, 0, 0
1241 AVX_INSTR movhpd, sse2, 1, 0, 0
1242 AVX_INSTR movhps, sse, 1, 0, 0
1243 AVX_INSTR movlhps, sse, 1, 0, 0
1244 AVX_INSTR movlpd, sse2, 1, 0, 0
1245 AVX_INSTR movlps, sse, 1, 0, 0
1246 AVX_INSTR movmskpd, sse2
1247 AVX_INSTR movmskps, sse
1248 AVX_INSTR movntdq, sse2
1249 AVX_INSTR movntdqa, sse4
1250 AVX_INSTR movntpd, sse2
1251 AVX_INSTR movntps, sse
1253 AVX_INSTR movsd, sse2, 1, 0, 0
1254 AVX_INSTR movshdup, sse3
1255 AVX_INSTR movsldup, sse3
1256 AVX_INSTR movss, sse, 1, 0, 0
1257 AVX_INSTR movupd, sse2
1258 AVX_INSTR movups, sse
1259 AVX_INSTR mpsadbw, sse4
1260 AVX_INSTR mulpd, sse2, 1, 0, 1
1261 AVX_INSTR mulps, sse, 1, 0, 1
1262 AVX_INSTR mulsd, sse2, 1, 0, 1
1263 AVX_INSTR mulss, sse, 1, 0, 1
1264 AVX_INSTR orpd, sse2, 1, 0, 1
1265 AVX_INSTR orps, sse, 1, 0, 1
1266 AVX_INSTR pabsb, ssse3
1267 AVX_INSTR pabsd, ssse3
1268 AVX_INSTR pabsw, ssse3
1269 AVX_INSTR packsswb, mmx, 0, 0, 0
1270 AVX_INSTR packssdw, mmx, 0, 0, 0
1271 AVX_INSTR packuswb, mmx, 0, 0, 0
1272 AVX_INSTR packusdw, sse4, 0, 0, 0
1273 AVX_INSTR paddb, mmx, 0, 0, 1
1274 AVX_INSTR paddw, mmx, 0, 0, 1
1275 AVX_INSTR paddd, mmx, 0, 0, 1
1276 AVX_INSTR paddq, sse2, 0, 0, 1
1277 AVX_INSTR paddsb, mmx, 0, 0, 1
1278 AVX_INSTR paddsw, mmx, 0, 0, 1
1279 AVX_INSTR paddusb, mmx, 0, 0, 1
1280 AVX_INSTR paddusw, mmx, 0, 0, 1
1281 AVX_INSTR palignr, ssse3
1282 AVX_INSTR pand, mmx, 0, 0, 1
1283 AVX_INSTR pandn, mmx, 0, 0, 0
1284 AVX_INSTR pavgb, mmx2, 0, 0, 1
1285 AVX_INSTR pavgw, mmx2, 0, 0, 1
1286 AVX_INSTR pblendvb, sse4, 0, 0, 0
1287 AVX_INSTR pblendw, sse4
1289 AVX_INSTR pcmpestri, sse42
1290 AVX_INSTR pcmpestrm, sse42
1291 AVX_INSTR pcmpistri, sse42
1292 AVX_INSTR pcmpistrm, sse42
1293 AVX_INSTR pcmpeqb, mmx, 0, 0, 1
1294 AVX_INSTR pcmpeqw, mmx, 0, 0, 1
1295 AVX_INSTR pcmpeqd, mmx, 0, 0, 1
1296 AVX_INSTR pcmpeqq, sse4, 0, 0, 1
1297 AVX_INSTR pcmpgtb, mmx, 0, 0, 0
1298 AVX_INSTR pcmpgtw, mmx, 0, 0, 0
1299 AVX_INSTR pcmpgtd, mmx, 0, 0, 0
1300 AVX_INSTR pcmpgtq, sse42, 0, 0, 0
1301 AVX_INSTR pextrb, sse4
1302 AVX_INSTR pextrd, sse4
1303 AVX_INSTR pextrq, sse4
1304 AVX_INSTR pextrw, mmx2
1305 AVX_INSTR phaddw, ssse3, 0, 0, 0
1306 AVX_INSTR phaddd, ssse3, 0, 0, 0
1307 AVX_INSTR phaddsw, ssse3, 0, 0, 0
1308 AVX_INSTR phminposuw, sse4
1309 AVX_INSTR phsubw, ssse3, 0, 0, 0
1310 AVX_INSTR phsubd, ssse3, 0, 0, 0
1311 AVX_INSTR phsubsw, ssse3, 0, 0, 0
1312 AVX_INSTR pinsrb, sse4
1313 AVX_INSTR pinsrd, sse4
1314 AVX_INSTR pinsrq, sse4
1315 AVX_INSTR pinsrw, mmx2
1316 AVX_INSTR pmaddwd, mmx, 0, 0, 1
1317 AVX_INSTR pmaddubsw, ssse3, 0, 0, 0
1318 AVX_INSTR pmaxsb, sse4, 0, 0, 1
1319 AVX_INSTR pmaxsw, mmx2, 0, 0, 1
1320 AVX_INSTR pmaxsd, sse4, 0, 0, 1
1321 AVX_INSTR pmaxub, mmx2, 0, 0, 1
1322 AVX_INSTR pmaxuw, sse4, 0, 0, 1
1323 AVX_INSTR pmaxud, sse4, 0, 0, 1
1324 AVX_INSTR pminsb, sse4, 0, 0, 1
1325 AVX_INSTR pminsw, mmx2, 0, 0, 1
1326 AVX_INSTR pminsd, sse4, 0, 0, 1
1327 AVX_INSTR pminub, mmx2, 0, 0, 1
1328 AVX_INSTR pminuw, sse4, 0, 0, 1
1329 AVX_INSTR pminud, sse4, 0, 0, 1
1330 AVX_INSTR pmovmskb, mmx2
1331 AVX_INSTR pmovsxbw, sse4
1332 AVX_INSTR pmovsxbd, sse4
1333 AVX_INSTR pmovsxbq, sse4
1334 AVX_INSTR pmovsxwd, sse4
1335 AVX_INSTR pmovsxwq, sse4
1336 AVX_INSTR pmovsxdq, sse4
1337 AVX_INSTR pmovzxbw, sse4
1338 AVX_INSTR pmovzxbd, sse4
1339 AVX_INSTR pmovzxbq, sse4
1340 AVX_INSTR pmovzxwd, sse4
1341 AVX_INSTR pmovzxwq, sse4
1342 AVX_INSTR pmovzxdq, sse4
1343 AVX_INSTR pmuldq, sse4, 0, 0, 1
1344 AVX_INSTR pmulhrsw, ssse3, 0, 0, 1
1345 AVX_INSTR pmulhuw, mmx2, 0, 0, 1
1346 AVX_INSTR pmulhw, mmx, 0, 0, 1
1347 AVX_INSTR pmullw, mmx, 0, 0, 1
1348 AVX_INSTR pmulld, sse4, 0, 0, 1
1349 AVX_INSTR pmuludq, sse2, 0, 0, 1
1350 AVX_INSTR por, mmx, 0, 0, 1
1351 AVX_INSTR psadbw, mmx2, 0, 0, 1
1352 AVX_INSTR pshufb, ssse3, 0, 0, 0
1353 AVX_INSTR pshufd, sse2
1354 AVX_INSTR pshufhw, sse2
1355 AVX_INSTR pshuflw, sse2
1356 AVX_INSTR psignb, ssse3, 0, 0, 0
1357 AVX_INSTR psignw, ssse3, 0, 0, 0
1358 AVX_INSTR psignd, ssse3, 0, 0, 0
1359 AVX_INSTR psllw, mmx, 0, 0, 0
1360 AVX_INSTR pslld, mmx, 0, 0, 0
1361 AVX_INSTR psllq, mmx, 0, 0, 0
1362 AVX_INSTR pslldq, sse2, 0, 0, 0
1363 AVX_INSTR psraw, mmx, 0, 0, 0
1364 AVX_INSTR psrad, mmx, 0, 0, 0
1365 AVX_INSTR psrlw, mmx, 0, 0, 0
1366 AVX_INSTR psrld, mmx, 0, 0, 0
1367 AVX_INSTR psrlq, mmx, 0, 0, 0
1368 AVX_INSTR psrldq, sse2, 0, 0, 0
1369 AVX_INSTR psubb, mmx, 0, 0, 0
1370 AVX_INSTR psubw, mmx, 0, 0, 0
1371 AVX_INSTR psubd, mmx, 0, 0, 0
1372 AVX_INSTR psubq, sse2, 0, 0, 0
1373 AVX_INSTR psubsb, mmx, 0, 0, 0
1374 AVX_INSTR psubsw, mmx, 0, 0, 0
1375 AVX_INSTR psubusb, mmx, 0, 0, 0
1376 AVX_INSTR psubusw, mmx, 0, 0, 0
1377 AVX_INSTR ptest, sse4
1378 AVX_INSTR punpckhbw, mmx, 0, 0, 0
1379 AVX_INSTR punpckhwd, mmx, 0, 0, 0
1380 AVX_INSTR punpckhdq, mmx, 0, 0, 0
1381 AVX_INSTR punpckhqdq, sse2, 0, 0, 0
1382 AVX_INSTR punpcklbw, mmx, 0, 0, 0
1383 AVX_INSTR punpcklwd, mmx, 0, 0, 0
1384 AVX_INSTR punpckldq, mmx, 0, 0, 0
1385 AVX_INSTR punpcklqdq, sse2, 0, 0, 0
1386 AVX_INSTR pxor, mmx, 0, 0, 1
1387 AVX_INSTR rcpps, sse, 1, 0, 0
1388 AVX_INSTR rcpss, sse, 1, 0, 0
1389 AVX_INSTR roundpd, sse4
1390 AVX_INSTR roundps, sse4
1391 AVX_INSTR roundsd, sse4
1392 AVX_INSTR roundss, sse4
1393 AVX_INSTR rsqrtps, sse, 1, 0, 0
1394 AVX_INSTR rsqrtss, sse, 1, 0, 0
1395 AVX_INSTR shufpd, sse2, 1, 1, 0
1396 AVX_INSTR shufps, sse, 1, 1, 0
1397 AVX_INSTR sqrtpd, sse2, 1, 0, 0
1398 AVX_INSTR sqrtps, sse, 1, 0, 0
1399 AVX_INSTR sqrtsd, sse2, 1, 0, 0
1400 AVX_INSTR sqrtss, sse, 1, 0, 0
1401 AVX_INSTR stmxcsr, sse
1402 AVX_INSTR subpd, sse2, 1, 0, 0
1403 AVX_INSTR subps, sse, 1, 0, 0
1404 AVX_INSTR subsd, sse2, 1, 0, 0
1405 AVX_INSTR subss, sse, 1, 0, 0
1406 AVX_INSTR ucomisd, sse2
1407 AVX_INSTR ucomiss, sse
1408 AVX_INSTR unpckhpd, sse2, 1, 0, 0
1409 AVX_INSTR unpckhps, sse, 1, 0, 0
1410 AVX_INSTR unpcklpd, sse2, 1, 0, 0
1411 AVX_INSTR unpcklps, sse, 1, 0, 0
1412 AVX_INSTR xorpd, sse2, 1, 0, 1
1413 AVX_INSTR xorps, sse, 1, 0, 1
1415 ; 3DNow instructions, for sharing code between AVX, SSE and 3DN
1416 AVX_INSTR pfadd, 3dnow, 1, 0, 1
1417 AVX_INSTR pfsub, 3dnow, 1, 0, 0
1418 AVX_INSTR pfmul, 3dnow, 1, 0, 1
1420 ; base-4 constants for shuffles
1423 %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
1425 CAT_XDEFINE q000, j, i
1427 CAT_XDEFINE q00, j, i
1429 CAT_XDEFINE q0, j, i
1439 %macro %1 4-7 %1, %2, %3
1446 %error non-xop emulation of ``%5 %1, %2, %3, %4'' is not supported
1451 FMA_INSTR pmacsww, pmullw, paddw
1452 FMA_INSTR pmacsdd, pmulld, paddd ; sse4 emulation
1453 FMA_INSTR pmacsdql, pmuldq, paddq ; sse4 emulation
1454 FMA_INSTR pmadcswd, pmaddwd, paddd
1456 ; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
1457 ; This lets us use tzcnt without bumping the yasm version requirement yet.
1458 %define tzcnt rep bsf
1460 ; convert FMA4 to FMA3 if possible
1462 %macro %1 4-8 %1, %2, %3, %4
1466 v%6 %1, %4, %3 ; %1 = %1 * %3 + %4
1468 v%7 %1, %2, %4 ; %1 = %2 * %1 + %4
1470 v%8 %1, %2, %3 ; %1 = %2 * %3 + %1
1472 %error fma3 emulation of ``%5 %1, %2, %3, %4'' is not supported
1477 FMA4_INSTR fmaddpd, fmadd132pd, fmadd213pd, fmadd231pd
1478 FMA4_INSTR fmaddps, fmadd132ps, fmadd213ps, fmadd231ps
1479 FMA4_INSTR fmaddsd, fmadd132sd, fmadd213sd, fmadd231sd
1480 FMA4_INSTR fmaddss, fmadd132ss, fmadd213ss, fmadd231ss
1482 FMA4_INSTR fmaddsubpd, fmaddsub132pd, fmaddsub213pd, fmaddsub231pd
1483 FMA4_INSTR fmaddsubps, fmaddsub132ps, fmaddsub213ps, fmaddsub231ps
1484 FMA4_INSTR fmsubaddpd, fmsubadd132pd, fmsubadd213pd, fmsubadd231pd
1485 FMA4_INSTR fmsubaddps, fmsubadd132ps, fmsubadd213ps, fmsubadd231ps
1487 FMA4_INSTR fmsubpd, fmsub132pd, fmsub213pd, fmsub231pd
1488 FMA4_INSTR fmsubps, fmsub132ps, fmsub213ps, fmsub231ps
1489 FMA4_INSTR fmsubsd, fmsub132sd, fmsub213sd, fmsub231sd
1490 FMA4_INSTR fmsubss, fmsub132ss, fmsub213ss, fmsub231ss
1492 FMA4_INSTR fnmaddpd, fnmadd132pd, fnmadd213pd, fnmadd231pd
1493 FMA4_INSTR fnmaddps, fnmadd132ps, fnmadd213ps, fnmadd231ps
1494 FMA4_INSTR fnmaddsd, fnmadd132sd, fnmadd213sd, fnmadd231sd
1495 FMA4_INSTR fnmaddss, fnmadd132ss, fnmadd213ss, fnmadd231ss
1497 FMA4_INSTR fnmsubpd, fnmsub132pd, fnmsub213pd, fnmsub231pd
1498 FMA4_INSTR fnmsubps, fnmsub132ps, fnmsub213ps, fnmsub231ps
1499 FMA4_INSTR fnmsubsd, fnmsub132sd, fnmsub213sd, fnmsub231sd
1500 FMA4_INSTR fnmsubss, fnmsub132ss, fnmsub213ss, fnmsub231ss
1502 ; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug
1503 %if ARCH_X86_64 == 0
1504 %macro vpbroadcastq 2