1 ;*****************************************************************************
2 ;* x86inc.asm: x264asm abstraction layer
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2013 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Anton Mitrofanov <BugMaster@narod.ru>
8 ;* Jason Garrett-Glaser <darkshikari@gmail.com>
9 ;* Henrik Gramner <henrik@gramner.com>
11 ;* Permission to use, copy, modify, and/or distribute this software for any
12 ;* purpose with or without fee is hereby granted, provided that the above
13 ;* copyright notice and this permission notice appear in all copies.
15 ;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
16 ;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 ;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
18 ;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 ;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ;*****************************************************************************
24 ; This is a header file for the x264ASM assembly language, which uses
25 ; NASM/YASM syntax combined with a large number of macros to provide easy
26 ; abstraction between different calling conventions (x86_32, win64, linux64).
27 ; It also has various other useful features to simplify writing the kind of
28 ; DSP functions that are most often used in x264.
30 ; Unlike the rest of x264, this file is available under an ISC license, as it
31 ; has significant usefulness outside of x264 and we want it to be available
32 ; to the largest audience possible. Of course, if you modify it for your own
33 ; purposes to add a new feature, we strongly encourage contributing a patch
34 ; as this feature might be useful for others as well. Send patches or ideas
35 ; to x264-devel@videolan.org .
37 %ifndef private_prefix
38 %define private_prefix x264
42 %define public_prefix private_prefix
48 %ifidn __OUTPUT_FORMAT__,win32
50 %elifidn __OUTPUT_FORMAT__,win64
52 %elifidn __OUTPUT_FORMAT__,x64
60 %define mangle(x) _ %+ x
65 ; aout does not support align=
66 ; NOTE: This section is out of sync with x264, in order to
67 ; keep supporting OS/2.
68 %macro SECTION_RODATA 0-1 16
69 %ifidn __OUTPUT_FORMAT__,aout
72 SECTION .rodata align=%1
76 %macro SECTION_TEXT 0-1 16
77 %ifidn __OUTPUT_FORMAT__,aout
80 SECTION .text align=%1
86 %elif ARCH_X86_64 == 0
87 ; x86_32 doesn't require PIC.
88 ; Some distros prefer shared objects to be PIC, but nothing breaks if
89 ; the code contains a few textrels, so we'll skip that complexity.
102 ; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
105 ; Macros to eliminate most code duplication between x86_32 and x86_64:
106 ; Currently this works only for leaf functions which load all their arguments
107 ; into registers at the start, and make no other use of the stack. Luckily that
108 ; covers most of x264's asm.
111 ; %1 = number of arguments. loads them from stack if needed.
112 ; %2 = number of registers used. pushes callee-saved regs if needed.
113 ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
114 ; %4 = (optional) stack size to be allocated. If not aligned (x86-32 ICC 10.x,
115 ; MSVC or YMM), the stack will be manually aligned (to 16 or 32 bytes),
116 ; and an extra register will be allocated to hold the original stack
117 ; pointer (to not invalidate r0m etc.). To prevent the use of an extra
118 ; register as stack pointer, request a negative stack size.
119 ; %4+/%5+ = list of names to define to registers
120 ; PROLOGUE can also be invoked by adding the same options to cglobal
123 ; cglobal foo, 2,3,0, dst, src, tmp
124 ; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
126 ; TODO Some functions can use some args directly from the stack. If they're the
127 ; last args then you can just not declare them, but if they're in the middle
128 ; we need more flexible macro.
131 ; Pops anything that was pushed by PROLOGUE, and returns.
134 ; Use this instead of RET if it's a branch target.
137 ; rN and rNq are the native-size register holding function argument N
138 ; rNd, rNw, rNb are dword, word, and byte size
139 ; rNh is the high 8 bits of the word size
140 ; rNm is the original location of arg N (a register or on the stack), dword
141 ; rNmp is native size
143 %macro DECLARE_REG 2-3
153 %elif ARCH_X86_64 ; memory
154 %define r%1m [rstk + stack_offset + %3]
155 %define r%1mp qword r %+ %1 %+ m
157 %define r%1m [rstk + stack_offset + %3]
158 %define r%1mp dword r %+ %1 %+ m
163 %macro DECLARE_REG_SIZE 3
179 DECLARE_REG_SIZE ax, al, ah
180 DECLARE_REG_SIZE bx, bl, bh
181 DECLARE_REG_SIZE cx, cl, ch
182 DECLARE_REG_SIZE dx, dl, dh
183 DECLARE_REG_SIZE si, sil, null
184 DECLARE_REG_SIZE di, dil, null
185 DECLARE_REG_SIZE bp, bpl, null
187 ; t# defines for when per-arch register allocation is more complex than just function arguments
189 %macro DECLARE_REG_TMP 1-*
192 CAT_XDEFINE t, %%i, r%1
198 %macro DECLARE_REG_TMP_SIZE 0-*
200 %define t%1q t%1 %+ q
201 %define t%1d t%1 %+ d
202 %define t%1w t%1 %+ w
203 %define t%1h t%1 %+ h
204 %define t%1b t%1 %+ b
209 DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
220 %assign stack_offset stack_offset+gprsize
227 %assign stack_offset stack_offset-gprsize
231 %macro PUSH_IF_USED 1-*
240 %macro POP_IF_USED 1-*
249 %macro LOAD_IF_USED 1-*
252 mov r%1, r %+ %1 %+ mp
261 %assign stack_offset stack_offset+(%2)
268 %assign stack_offset stack_offset-(%2)
278 %macro movsxdifnidn 2
290 %macro DEFINE_ARGS 0-*
294 CAT_UNDEF arg_name %+ %%i, q
295 CAT_UNDEF arg_name %+ %%i, d
296 CAT_UNDEF arg_name %+ %%i, w
297 CAT_UNDEF arg_name %+ %%i, h
298 CAT_UNDEF arg_name %+ %%i, b
299 CAT_UNDEF arg_name %+ %%i, m
300 CAT_UNDEF arg_name %+ %%i, mp
301 CAT_UNDEF arg_name, %%i
306 %xdefine %%stack_offset stack_offset
307 %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
310 %xdefine %1q r %+ %%i %+ q
311 %xdefine %1d r %+ %%i %+ d
312 %xdefine %1w r %+ %%i %+ w
313 %xdefine %1h r %+ %%i %+ h
314 %xdefine %1b r %+ %%i %+ b
315 %xdefine %1m r %+ %%i %+ m
316 %xdefine %1mp r %+ %%i %+ mp
317 CAT_XDEFINE arg_name, %%i, %1
321 %xdefine stack_offset %%stack_offset
322 %assign n_arg_names %0
325 %macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
328 %assign %%stack_alignment ((mmsize + 15) & ~15)
329 %assign stack_size %1
331 %assign stack_size -stack_size
333 %assign stack_size_padded stack_size
335 %assign stack_size_padded stack_size_padded + 32 ; reserve 32 bytes for shadow space
337 %assign xmm_regs_used %2
338 %if xmm_regs_used > 8
339 %assign stack_size_padded stack_size_padded + (xmm_regs_used-8)*16
343 %if mmsize <= 16 && HAVE_ALIGNED_STACK
344 %assign stack_size_padded stack_size_padded + %%stack_alignment - gprsize - (stack_offset & (%%stack_alignment - 1))
345 SUB rsp, stack_size_padded
347 %assign %%reg_num (regs_used - 1)
348 %xdefine rstk r %+ %%reg_num
349 ; align stack, and save original stack location directly above
350 ; it, i.e. in [rsp+stack_size_padded], so we can restore the
351 ; stack in a single instruction (i.e. mov rsp, rstk or mov
352 ; rsp, [rsp+stack_size_padded])
354 %if %1 < 0 ; need to store rsp on stack
355 sub rsp, gprsize+stack_size_padded
356 and rsp, ~(%%stack_alignment-1)
357 %xdefine rstkm [rsp+stack_size_padded]
359 %else ; can keep rsp in rstk during whole function
360 sub rsp, stack_size_padded
361 and rsp, ~(%%stack_alignment-1)
370 %macro SETUP_STACK_POINTER 1
372 %if %1 != 0 && (HAVE_ALIGNED_STACK == 0 || mmsize == 32)
374 %assign regs_used (regs_used + 1)
375 %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2
376 %warning "Stack pointer will overwrite register argument"
382 %macro DEFINE_ARGS_INTERNAL 3+
392 %if WIN64 ; Windows x64 ;=================================================
398 DECLARE_REG 4, R10, 40
399 DECLARE_REG 5, R11, 48
400 DECLARE_REG 6, rax, 56
401 DECLARE_REG 7, rdi, 64
402 DECLARE_REG 8, rsi, 72
403 DECLARE_REG 9, rbx, 80
404 DECLARE_REG 10, rbp, 88
405 DECLARE_REG 11, R12, 96
406 DECLARE_REG 12, R13, 104
407 DECLARE_REG 13, R14, 112
408 DECLARE_REG 14, R15, 120
410 %macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
413 ASSERT regs_used >= num_args
414 SETUP_STACK_POINTER %4
415 ASSERT regs_used <= 15
416 PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
418 %if mmsize != 8 && stack_size == 0
421 LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
422 DEFINE_ARGS_INTERNAL %0, %4, %5
425 %macro WIN64_PUSH_XMM 0
426 ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated.
427 %if xmm_regs_used > 6
428 movaps [rstk + stack_offset + 8], xmm6
430 %if xmm_regs_used > 7
431 movaps [rstk + stack_offset + 24], xmm7
433 %if xmm_regs_used > 8
436 movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i
442 %macro WIN64_SPILL_XMM 1
443 %assign xmm_regs_used %1
444 ASSERT xmm_regs_used <= 16
445 %if xmm_regs_used > 8
446 %assign stack_size_padded (xmm_regs_used-8)*16 + (~stack_offset&8) + 32
447 SUB rsp, stack_size_padded
452 %macro WIN64_RESTORE_XMM_INTERNAL 1
454 %if xmm_regs_used > 8
455 %assign %%i xmm_regs_used
458 movaps xmm %+ %%i, [%1 + (%%i-8)*16 + stack_size + 32]
461 %if stack_size_padded > 0
462 %if stack_size > 0 && (mmsize == 32 || HAVE_ALIGNED_STACK == 0)
465 add %1, stack_size_padded
466 %assign %%pad_size stack_size_padded
469 %if xmm_regs_used > 7
470 movaps xmm7, [%1 + stack_offset - %%pad_size + 24]
472 %if xmm_regs_used > 6
473 movaps xmm6, [%1 + stack_offset - %%pad_size + 8]
477 %macro WIN64_RESTORE_XMM 1
478 WIN64_RESTORE_XMM_INTERNAL %1
479 %assign stack_offset (stack_offset-stack_size_padded)
480 %assign xmm_regs_used 0
483 %define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0
486 WIN64_RESTORE_XMM_INTERNAL rsp
487 POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
494 %elif ARCH_X86_64 ; *nix x64 ;=============================================
502 DECLARE_REG 6, rax, 8
503 DECLARE_REG 7, R10, 16
504 DECLARE_REG 8, R11, 24
505 DECLARE_REG 9, rbx, 32
506 DECLARE_REG 10, rbp, 40
507 DECLARE_REG 11, R12, 48
508 DECLARE_REG 12, R13, 56
509 DECLARE_REG 13, R14, 64
510 DECLARE_REG 14, R15, 72
512 %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
515 ASSERT regs_used >= num_args
516 SETUP_STACK_POINTER %4
517 ASSERT regs_used <= 15
518 PUSH_IF_USED 9, 10, 11, 12, 13, 14
520 LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
521 DEFINE_ARGS_INTERNAL %0, %4, %5
524 %define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
527 %if stack_size_padded > 0
528 %if mmsize == 32 || HAVE_ALIGNED_STACK == 0
531 add rsp, stack_size_padded
534 POP_IF_USED 14, 13, 12, 11, 10, 9
541 %else ; X86_32 ;==============================================================
543 DECLARE_REG 0, eax, 4
544 DECLARE_REG 1, ecx, 8
545 DECLARE_REG 2, edx, 12
546 DECLARE_REG 3, ebx, 16
547 DECLARE_REG 4, esi, 20
548 DECLARE_REG 5, edi, 24
549 DECLARE_REG 6, ebp, 28
552 %macro DECLARE_ARG 1-*
554 %define r%1m [rstk + stack_offset + 4*%1 + 4]
555 %define r%1mp dword r%1m
560 DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
562 %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
565 ASSERT regs_used >= num_args
572 SETUP_STACK_POINTER %4
573 ASSERT regs_used <= 7
574 PUSH_IF_USED 3, 4, 5, 6
576 LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
577 DEFINE_ARGS_INTERNAL %0, %4, %5
580 %define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
583 %if stack_size_padded > 0
584 %if mmsize == 32 || HAVE_ALIGNED_STACK == 0
587 add rsp, stack_size_padded
590 POP_IF_USED 6, 5, 4, 3
597 %endif ;======================================================================
600 %macro WIN64_SPILL_XMM 1
602 %macro WIN64_RESTORE_XMM 1
604 %macro WIN64_PUSH_XMM 0
608 ; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
609 ; a branch or a branch target. So switch to a 2-byte form of ret in that case.
610 ; We can automatically detect "follows a branch", but not a branch target.
611 ; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
620 %define last_branch_adr $$
621 %macro AUTO_REP_RET 0
623 times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ != last_branch_adr.
624 %elif notcpuflag(ssse3)
625 times ((last_branch_adr-$)>>31)+1 rep
630 %macro BRANCH_INSTR 0-*
635 %xdefine last_branch_adr %%branch_instr
641 BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp
643 %macro TAIL_CALL 2 ; callee, is_nonadjacent
652 ;=============================================================================
653 ; arch-independent part
654 ;=============================================================================
656 %assign function_align 16
659 ; Applies any symbol mangling needed for C linkage, and sets up a define such that
660 ; subsequent uses of the function name automatically refer to the mangled version.
661 ; Appends cpuflags to the function name if cpuflags has been specified.
662 ; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
663 ; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
664 %macro cglobal 1-2+ "" ; name, [PROLOGUE args]
665 cglobal_internal 1, %1 %+ SUFFIX, %2
667 %macro cvisible 1-2+ "" ; name, [PROLOGUE args]
668 cglobal_internal 0, %1 %+ SUFFIX, %2
670 %macro cglobal_internal 2-3+
672 %xdefine %%FUNCTION_PREFIX private_prefix
673 %xdefine %%VISIBILITY hidden
675 %xdefine %%FUNCTION_PREFIX public_prefix
676 %xdefine %%VISIBILITY
679 %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
680 %xdefine %2.skip_prologue %2 %+ .skip_prologue
681 CAT_XDEFINE cglobaled_, %2, 1
683 %xdefine current_function %2
684 %ifidn __OUTPUT_FORMAT__,elf
685 global %2:function %%VISIBILITY
691 RESET_MM_PERMUTATION ; needed for x86-64, also makes disassembly somewhat nicer
692 %xdefine rstk rsp ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required
693 %assign stack_offset 0 ; stack pointer offset relative to the return address
694 %assign stack_size 0 ; amount of stack space that can be freely used inside a function
695 %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding
696 %assign xmm_regs_used 0 ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64
703 %xdefine %1 mangle(private_prefix %+ _ %+ %1)
704 CAT_XDEFINE cglobaled_, %1, 1
708 ; like cextern, but without the prefix
709 %macro cextern_naked 1
710 %xdefine %1 mangle(%1)
711 CAT_XDEFINE cglobaled_, %1, 1
716 %xdefine %1 mangle(private_prefix %+ _ %+ %1)
717 %ifidn __OUTPUT_FORMAT__,elf
718 global %1:data hidden
725 ; This is needed for ELF, otherwise the GNU linker assumes the stack is
726 ; executable by default.
727 %ifidn __OUTPUT_FORMAT__,elf
728 SECTION .note.GNU-stack noalloc noexec nowrite progbits
733 %assign cpuflags_mmx (1<<0)
734 %assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
735 %assign cpuflags_3dnow (1<<2) | cpuflags_mmx
736 %assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
737 %assign cpuflags_sse (1<<4) | cpuflags_mmx2
738 %assign cpuflags_sse2 (1<<5) | cpuflags_sse
739 %assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
740 %assign cpuflags_sse3 (1<<7) | cpuflags_sse2
741 %assign cpuflags_ssse3 (1<<8) | cpuflags_sse3
742 %assign cpuflags_sse4 (1<<9) | cpuflags_ssse3
743 %assign cpuflags_sse42 (1<<10)| cpuflags_sse4
744 %assign cpuflags_avx (1<<11)| cpuflags_sse42
745 %assign cpuflags_xop (1<<12)| cpuflags_avx
746 %assign cpuflags_fma4 (1<<13)| cpuflags_avx
747 %assign cpuflags_avx2 (1<<14)| cpuflags_avx
748 %assign cpuflags_fma3 (1<<15)| cpuflags_avx
750 %assign cpuflags_cache32 (1<<16)
751 %assign cpuflags_cache64 (1<<17)
752 %assign cpuflags_slowctz (1<<18)
753 %assign cpuflags_lzcnt (1<<19)
754 %assign cpuflags_aligned (1<<20) ; not a cpu feature, but a function variant
755 %assign cpuflags_atom (1<<21)
756 %assign cpuflags_bmi1 (1<<22)|cpuflags_lzcnt
757 %assign cpuflags_bmi2 (1<<23)|cpuflags_bmi1
759 %define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
760 %define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
762 ; Takes up to 2 cpuflags from the above list.
763 ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
764 ; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
765 %macro INIT_CPUFLAGS 0-2
769 %assign cpuflags cpuflags_%1
771 %xdefine cpuname %1_%2
772 %assign cpuflags cpuflags | cpuflags_%2
774 %xdefine SUFFIX _ %+ cpuname
776 %assign avx_enabled 1
778 %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2))
781 %define movnta movntps
799 ; m# is a simd regsiter of the currently selected size
800 ; xm# is the corresponding xmmreg (if selcted xmm or ymm size), or mmreg (if selected mmx)
801 ; ym# is the corresponding ymmreg (if selcted xmm or ymm size), or mmreg (if selected mmx)
802 ; (All 3 remain in sync through SWAP.)
813 %assign avx_enabled 0
814 %define RESET_MM_PERMUTATION INIT_MMX %1
820 %define movnta movntq
823 CAT_XDEFINE m, %%i, mm %+ %%i
824 CAT_XDEFINE nmm, %%i, %%i
836 %assign avx_enabled 0
837 %define RESET_MM_PERMUTATION INIT_XMM %1
841 %define num_mmregs 16
846 %define movnta movntdq
849 CAT_XDEFINE m, %%i, xmm %+ %%i
850 CAT_XDEFINE nxmm, %%i, %%i
856 ; FIXME: INIT_AVX can be replaced by INIT_XMM avx
859 %assign avx_enabled 1
860 %define PALIGNR PALIGNR_SSSE3
861 %define RESET_MM_PERMUTATION INIT_AVX
865 %assign avx_enabled 1
866 %define RESET_MM_PERMUTATION INIT_YMM %1
870 %define num_mmregs 16
875 %define movnta movntdq
878 CAT_XDEFINE m, %%i, ymm %+ %%i
879 CAT_XDEFINE nymm, %%i, %%i
887 %macro DECLARE_MMCAST 1
892 %define xmmxmm%1 xmm%1
893 %define xmmymm%1 xmm%1
895 %define ymmxmm%1 ymm%1
896 %define ymmymm%1 ymm%1
897 %define xm%1 xmm %+ m%1
898 %define ym%1 ymm %+ m%1
907 ; I often want to use macros that permute their arguments. e.g. there's no
908 ; efficient way to implement butterfly or transpose or dct without swapping some
911 ; I would like to not have to manually keep track of the permutations:
912 ; If I insert a permutation in the middle of a function, it should automatically
913 ; change everything that follows. For more complex macros I may also have multiple
914 ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
916 ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
917 ; permutes its arguments. It's equivalent to exchanging the contents of the
918 ; registers, except that this way you exchange the register names instead, so it
919 ; doesn't cost any cycles.
921 %macro PERMUTE 2-* ; takes a list of pairs to swap
928 CAT_XDEFINE n, m%1, %1
933 %macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
934 %ifnum %1 ; SWAP 0, 1, ...
935 SWAP_INTERNAL_NUM %1, %2
936 %else ; SWAP m0, m1, ...
937 SWAP_INTERNAL_NAME %1, %2
941 %macro SWAP_INTERNAL_NUM 2-*
946 CAT_XDEFINE n, m%1, %1
947 CAT_XDEFINE n, m%2, %2
952 %macro SWAP_INTERNAL_NAME 2-*
953 %xdefine %%args n %+ %1
955 %xdefine %%args %%args, n %+ %2
958 SWAP_INTERNAL_NUM %%args
961 ; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
962 ; calls to that function will automatically load the permutation, so values can
963 ; be returned in mmregs.
964 %macro SAVE_MM_PERMUTATION 0-1
968 %xdefine %%f current_function %+ _m
972 CAT_XDEFINE %%f, %%i, m %+ %%i
977 %macro LOAD_MM_PERMUTATION 1 ; name to load from
981 CAT_XDEFINE m, %%i, %1_m %+ %%i
982 CAT_XDEFINE n, m %+ %%i, %%i
988 ; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
990 call_internal %1 %+ SUFFIX, %1
992 %macro call_internal 2
1000 LOAD_MM_PERMUTATION %%i
1003 ; Substitutions that reduce instruction size but are functionally equivalent
1028 ;=============================================================================
1029 ; AVX abstraction layer
1030 ;=============================================================================
1035 CAT_XDEFINE sizeofmm, i, 8
1037 CAT_XDEFINE sizeofxmm, i, 16
1038 CAT_XDEFINE sizeofymm, i, 32
1043 %macro CHECK_AVX_INSTR_EMU 3-*
1044 %xdefine %%opcode %1
1048 %error non-avx emulation of ``%%opcode'' is not supported
1055 ;%2 == 1 if float, 0 if int
1056 ;%3 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
1057 ;%4 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
1059 %macro RUN_AVX_INSTR 5-8+
1061 %assign %%sizeofreg sizeof%6
1063 %assign %%sizeofreg sizeof%5
1065 %assign %%sizeofreg mmsize
1067 %assign %%emulate_avx 0
1068 %if avx_enabled && %%sizeofreg >= 16
1069 %xdefine %%instr v%1
1073 %assign %%emulate_avx 1
1082 CHECK_AVX_INSTR_EMU {%1 %5, %6, %7, %8}, %5, %7, %8
1084 CHECK_AVX_INSTR_EMU {%1 %5, %6, %7}, %5, %7
1088 ; 3-operand AVX instructions with a memory arg can only have it in src2,
1089 ; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
1090 ; So, if the instruction is commutative with a memory arg, swap them.
1095 %if %%sizeofreg == 8
1109 %%instr %5, %6, %7, %8
1120 ;%2 == 1 if float, 0 if int
1121 ;%3 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
1122 ;%4 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
1123 %macro AVX_INSTR 1-4 0, 1, 0
1124 %macro %1 1-9 fnord, fnord, fnord, fnord, %1, %2, %3, %4
1126 RUN_AVX_INSTR %6, %7, %8, %9, %1
1128 RUN_AVX_INSTR %6, %7, %8, %9, %1, %2
1130 RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3
1132 RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3, %4
1134 RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3, %4, %5
1139 ; Instructions with both VEX and non-VEX encodings
1140 ; Non-destructive instructions are written without parameters
1141 AVX_INSTR addpd, 1, 0, 1
1142 AVX_INSTR addps, 1, 0, 1
1143 AVX_INSTR addsd, 1, 0, 1
1144 AVX_INSTR addss, 1, 0, 1
1145 AVX_INSTR addsubpd, 1, 0, 0
1146 AVX_INSTR addsubps, 1, 0, 0
1147 AVX_INSTR aesdec, 0, 0, 0
1148 AVX_INSTR aesdeclast, 0, 0, 0
1149 AVX_INSTR aesenc, 0, 0, 0
1150 AVX_INSTR aesenclast, 0, 0, 0
1152 AVX_INSTR aeskeygenassist
1153 AVX_INSTR andnpd, 1, 0, 0
1154 AVX_INSTR andnps, 1, 0, 0
1155 AVX_INSTR andpd, 1, 0, 1
1156 AVX_INSTR andps, 1, 0, 1
1157 AVX_INSTR blendpd, 1, 0, 0
1158 AVX_INSTR blendps, 1, 0, 0
1159 AVX_INSTR blendvpd, 1, 0, 0
1160 AVX_INSTR blendvps, 1, 0, 0
1161 AVX_INSTR cmppd, 1, 1, 0
1162 AVX_INSTR cmpps, 1, 1, 0
1163 AVX_INSTR cmpsd, 1, 1, 0
1164 AVX_INSTR cmpss, 1, 1, 0
1183 AVX_INSTR divpd, 1, 0, 0
1184 AVX_INSTR divps, 1, 0, 0
1185 AVX_INSTR divsd, 1, 0, 0
1186 AVX_INSTR divss, 1, 0, 0
1187 AVX_INSTR dppd, 1, 1, 0
1188 AVX_INSTR dpps, 1, 1, 0
1190 AVX_INSTR haddpd, 1, 0, 0
1191 AVX_INSTR haddps, 1, 0, 0
1192 AVX_INSTR hsubpd, 1, 0, 0
1193 AVX_INSTR hsubps, 1, 0, 0
1194 AVX_INSTR insertps, 1, 1, 0
1197 AVX_INSTR maskmovdqu
1198 AVX_INSTR maxpd, 1, 0, 1
1199 AVX_INSTR maxps, 1, 0, 1
1200 AVX_INSTR maxsd, 1, 0, 1
1201 AVX_INSTR maxss, 1, 0, 1
1202 AVX_INSTR minpd, 1, 0, 1
1203 AVX_INSTR minps, 1, 0, 1
1204 AVX_INSTR minsd, 1, 0, 1
1205 AVX_INSTR minss, 1, 0, 1
1212 AVX_INSTR movhlps, 1, 0, 0
1213 AVX_INSTR movhpd, 1, 0, 0
1214 AVX_INSTR movhps, 1, 0, 0
1215 AVX_INSTR movlhps, 1, 0, 0
1216 AVX_INSTR movlpd, 1, 0, 0
1217 AVX_INSTR movlps, 1, 0, 0
1225 AVX_INSTR movsd, 1, 0, 0
1228 AVX_INSTR movss, 1, 0, 0
1231 AVX_INSTR mpsadbw, 0, 1, 0
1232 AVX_INSTR mulpd, 1, 0, 1
1233 AVX_INSTR mulps, 1, 0, 1
1234 AVX_INSTR mulsd, 1, 0, 1
1235 AVX_INSTR mulss, 1, 0, 1
1236 AVX_INSTR orpd, 1, 0, 1
1237 AVX_INSTR orps, 1, 0, 1
1241 AVX_INSTR packsswb, 0, 0, 0
1242 AVX_INSTR packssdw, 0, 0, 0
1243 AVX_INSTR packuswb, 0, 0, 0
1244 AVX_INSTR packusdw, 0, 0, 0
1245 AVX_INSTR paddb, 0, 0, 1
1246 AVX_INSTR paddw, 0, 0, 1
1247 AVX_INSTR paddd, 0, 0, 1
1248 AVX_INSTR paddq, 0, 0, 1
1249 AVX_INSTR paddsb, 0, 0, 1
1250 AVX_INSTR paddsw, 0, 0, 1
1251 AVX_INSTR paddusb, 0, 0, 1
1252 AVX_INSTR paddusw, 0, 0, 1
1253 AVX_INSTR palignr, 0, 1, 0
1254 AVX_INSTR pand, 0, 0, 1
1255 AVX_INSTR pandn, 0, 0, 0
1256 AVX_INSTR pavgb, 0, 0, 1
1257 AVX_INSTR pavgw, 0, 0, 1
1258 AVX_INSTR pblendvb, 0, 0, 0
1259 AVX_INSTR pblendw, 0, 1, 0
1260 AVX_INSTR pclmulqdq, 0, 1, 0
1265 AVX_INSTR pcmpeqb, 0, 0, 1
1266 AVX_INSTR pcmpeqw, 0, 0, 1
1267 AVX_INSTR pcmpeqd, 0, 0, 1
1268 AVX_INSTR pcmpeqq, 0, 0, 1
1269 AVX_INSTR pcmpgtb, 0, 0, 0
1270 AVX_INSTR pcmpgtw, 0, 0, 0
1271 AVX_INSTR pcmpgtd, 0, 0, 0
1272 AVX_INSTR pcmpgtq, 0, 0, 0
1277 AVX_INSTR phaddw, 0, 0, 0
1278 AVX_INSTR phaddd, 0, 0, 0
1279 AVX_INSTR phaddsw, 0, 0, 0
1280 AVX_INSTR phminposuw
1281 AVX_INSTR phsubw, 0, 0, 0
1282 AVX_INSTR phsubd, 0, 0, 0
1283 AVX_INSTR phsubsw, 0, 0, 0
1284 AVX_INSTR pinsrb, 0, 1, 0
1285 AVX_INSTR pinsrd, 0, 1, 0
1286 AVX_INSTR pinsrq, 0, 1, 0
1287 AVX_INSTR pinsrw, 0, 1, 0
1288 AVX_INSTR pmaddwd, 0, 0, 1
1289 AVX_INSTR pmaddubsw, 0, 0, 0
1290 AVX_INSTR pmaxsb, 0, 0, 1
1291 AVX_INSTR pmaxsw, 0, 0, 1
1292 AVX_INSTR pmaxsd, 0, 0, 1
1293 AVX_INSTR pmaxub, 0, 0, 1
1294 AVX_INSTR pmaxuw, 0, 0, 1
1295 AVX_INSTR pmaxud, 0, 0, 1
1296 AVX_INSTR pminsb, 0, 0, 1
1297 AVX_INSTR pminsw, 0, 0, 1
1298 AVX_INSTR pminsd, 0, 0, 1
1299 AVX_INSTR pminub, 0, 0, 1
1300 AVX_INSTR pminuw, 0, 0, 1
1301 AVX_INSTR pminud, 0, 0, 1
1315 AVX_INSTR pmuldq, 0, 0, 1
1316 AVX_INSTR pmulhrsw, 0, 0, 1
1317 AVX_INSTR pmulhuw, 0, 0, 1
1318 AVX_INSTR pmulhw, 0, 0, 1
1319 AVX_INSTR pmullw, 0, 0, 1
1320 AVX_INSTR pmulld, 0, 0, 1
1321 AVX_INSTR pmuludq, 0, 0, 1
1322 AVX_INSTR por, 0, 0, 1
1323 AVX_INSTR psadbw, 0, 0, 1
1324 AVX_INSTR pshufb, 0, 0, 0
1328 AVX_INSTR psignb, 0, 0, 0
1329 AVX_INSTR psignw, 0, 0, 0
1330 AVX_INSTR psignd, 0, 0, 0
1331 AVX_INSTR psllw, 0, 0, 0
1332 AVX_INSTR pslld, 0, 0, 0
1333 AVX_INSTR psllq, 0, 0, 0
1334 AVX_INSTR pslldq, 0, 0, 0
1335 AVX_INSTR psraw, 0, 0, 0
1336 AVX_INSTR psrad, 0, 0, 0
1337 AVX_INSTR psrlw, 0, 0, 0
1338 AVX_INSTR psrld, 0, 0, 0
1339 AVX_INSTR psrlq, 0, 0, 0
1340 AVX_INSTR psrldq, 0, 0, 0
1341 AVX_INSTR psubb, 0, 0, 0
1342 AVX_INSTR psubw, 0, 0, 0
1343 AVX_INSTR psubd, 0, 0, 0
1344 AVX_INSTR psubq, 0, 0, 0
1345 AVX_INSTR psubsb, 0, 0, 0
1346 AVX_INSTR psubsw, 0, 0, 0
1347 AVX_INSTR psubusb, 0, 0, 0
1348 AVX_INSTR psubusw, 0, 0, 0
1350 AVX_INSTR punpckhbw, 0, 0, 0
1351 AVX_INSTR punpckhwd, 0, 0, 0
1352 AVX_INSTR punpckhdq, 0, 0, 0
1353 AVX_INSTR punpckhqdq, 0, 0, 0
1354 AVX_INSTR punpcklbw, 0, 0, 0
1355 AVX_INSTR punpcklwd, 0, 0, 0
1356 AVX_INSTR punpckldq, 0, 0, 0
1357 AVX_INSTR punpcklqdq, 0, 0, 0
1358 AVX_INSTR pxor, 0, 0, 1
1359 AVX_INSTR rcpps, 1, 0, 0
1360 AVX_INSTR rcpss, 1, 0, 0
1365 AVX_INSTR rsqrtps, 1, 0, 0
1366 AVX_INSTR rsqrtss, 1, 0, 0
1367 AVX_INSTR shufpd, 1, 1, 0
1368 AVX_INSTR shufps, 1, 1, 0
1369 AVX_INSTR sqrtpd, 1, 0, 0
1370 AVX_INSTR sqrtps, 1, 0, 0
1371 AVX_INSTR sqrtsd, 1, 0, 0
1372 AVX_INSTR sqrtss, 1, 0, 0
1374 AVX_INSTR subpd, 1, 0, 0
1375 AVX_INSTR subps, 1, 0, 0
1376 AVX_INSTR subsd, 1, 0, 0
1377 AVX_INSTR subss, 1, 0, 0
1380 AVX_INSTR unpckhpd, 1, 0, 0
1381 AVX_INSTR unpckhps, 1, 0, 0
1382 AVX_INSTR unpcklpd, 1, 0, 0
1383 AVX_INSTR unpcklps, 1, 0, 0
1384 AVX_INSTR xorpd, 1, 0, 1
1385 AVX_INSTR xorps, 1, 0, 1
1387 ; 3DNow instructions, for sharing code between AVX, SSE and 3DN
1388 AVX_INSTR pfadd, 1, 0, 1
1389 AVX_INSTR pfsub, 1, 0, 0
1390 AVX_INSTR pfmul, 1, 0, 1
1392 ; base-4 constants for shuffles
1395 %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
1397 CAT_XDEFINE q000, j, i
1399 CAT_XDEFINE q00, j, i
1401 CAT_XDEFINE q0, j, i
1411 %macro %1 4-7 %1, %2, %3
1421 FMA_INSTR pmacsdd, pmulld, paddd
1422 FMA_INSTR pmacsww, pmullw, paddw
1423 FMA_INSTR pmadcswd, pmaddwd, paddd
1425 ; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
1426 ; This lets us use tzcnt without bumping the yasm version requirement yet.
1427 %define tzcnt rep bsf
1429 ; convert FMA4 to FMA3 if possible
1431 %macro %1 4-8 %1, %2, %3, %4
1435 v%6 %1, %4, %3 ; %1 = %1 * %3 + %4
1437 v%7 %1, %2, %4 ; %1 = %2 * %1 + %4
1439 v%8 %1, %2, %3 ; %1 = %2 * %3 + %1
1441 %error fma3 emulation of ``%5 %1, %2, %3, %4'' is not supported
1446 FMA4_INSTR fmaddpd, fmadd132pd, fmadd213pd, fmadd231pd
1447 FMA4_INSTR fmaddps, fmadd132ps, fmadd213ps, fmadd231ps
1448 FMA4_INSTR fmaddsd, fmadd132sd, fmadd213sd, fmadd231sd
1449 FMA4_INSTR fmaddss, fmadd132ss, fmadd213ss, fmadd231ss
1451 FMA4_INSTR fmaddsubpd, fmaddsub132pd, fmaddsub213pd, fmaddsub231pd
1452 FMA4_INSTR fmaddsubps, fmaddsub132ps, fmaddsub213ps, fmaddsub231ps
1453 FMA4_INSTR fmsubaddpd, fmsubadd132pd, fmsubadd213pd, fmsubadd231pd
1454 FMA4_INSTR fmsubaddps, fmsubadd132ps, fmsubadd213ps, fmsubadd231ps
1456 FMA4_INSTR fmsubpd, fmsub132pd, fmsub213pd, fmsub231pd
1457 FMA4_INSTR fmsubps, fmsub132ps, fmsub213ps, fmsub231ps
1458 FMA4_INSTR fmsubsd, fmsub132sd, fmsub213sd, fmsub231sd
1459 FMA4_INSTR fmsubss, fmsub132ss, fmsub213ss, fmsub231ss
1461 FMA4_INSTR fnmaddpd, fnmadd132pd, fnmadd213pd, fnmadd231pd
1462 FMA4_INSTR fnmaddps, fnmadd132ps, fnmadd213ps, fnmadd231ps
1463 FMA4_INSTR fnmaddsd, fnmadd132sd, fnmadd213sd, fnmadd231sd
1464 FMA4_INSTR fnmaddss, fnmadd132ss, fnmadd213ss, fnmadd231ss
1466 FMA4_INSTR fnmsubpd, fnmsub132pd, fnmsub213pd, fnmsub231pd
1467 FMA4_INSTR fnmsubps, fnmsub132ps, fnmsub213ps, fnmsub231ps
1468 FMA4_INSTR fnmsubsd, fnmsub132sd, fnmsub213sd, fnmsub231sd
1469 FMA4_INSTR fnmsubss, fnmsub132ss, fnmsub213ss, fnmsub231ss
1471 ; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug
1472 %if ARCH_X86_64 == 0
1473 %macro vpbroadcastq 2