1 ;*****************************************************************************
2 ;* x86inc.asm: x264asm abstraction layer
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2018 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Henrik Gramner <henrik@gramner.com>
8 ;* Anton Mitrofanov <BugMaster@narod.ru>
9 ;* Fiona Glaser <fiona@x264.com>
11 ;* Permission to use, copy, modify, and/or distribute this software for any
12 ;* purpose with or without fee is hereby granted, provided that the above
13 ;* copyright notice and this permission notice appear in all copies.
15 ;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
16 ;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 ;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
18 ;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 ;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ;*****************************************************************************
24 ; This is a header file for the x264ASM assembly language, which uses
25 ; NASM/YASM syntax combined with a large number of macros to provide easy
26 ; abstraction between different calling conventions (x86_32, win64, linux64).
27 ; It also has various other useful features to simplify writing the kind of
28 ; DSP functions that are most often used in x264.
30 ; Unlike the rest of x264, this file is available under an ISC license, as it
31 ; has significant usefulness outside of x264 and we want it to be available
32 ; to the largest audience possible. Of course, if you modify it for your own
33 ; purposes to add a new feature, we strongly encourage contributing a patch
34 ; as this feature might be useful for others as well. Send patches or ideas
35 ; to x264-devel@videolan.org .
37 %ifndef private_prefix
38 %define private_prefix x264
42 %define public_prefix private_prefix
45 %if HAVE_ALIGNED_STACK
46 %define STACK_ALIGNMENT 16
48 %ifndef STACK_ALIGNMENT
50 %define STACK_ALIGNMENT 16
52 %define STACK_ALIGNMENT 4
59 %ifidn __OUTPUT_FORMAT__,win32
61 %elifidn __OUTPUT_FORMAT__,win64
63 %elifidn __OUTPUT_FORMAT__,x64
71 %ifidn __OUTPUT_FORMAT__,elf
73 %elifidn __OUTPUT_FORMAT__,elf32
75 %elifidn __OUTPUT_FORMAT__,elf64
80 %define mangle(x) _ %+ x
85 ; aout does not support align=
86 ; NOTE: This section is out of sync with x264, in order to
87 ; keep supporting OS/2.
88 %macro SECTION_RODATA 0-1 16
89 %ifidn __OUTPUT_FORMAT__,aout
91 %elifidn __OUTPUT_FORMAT__,coff
93 %elifidn __OUTPUT_FORMAT__,win32
94 SECTION .rdata align=%1
96 SECTION .rdata align=%1
98 SECTION .rodata align=%1
104 %elif ARCH_X86_64 == 0
105 ; x86_32 doesn't require PIC.
106 ; Some distros prefer shared objects to be PIC, but nothing breaks if
107 ; the code contains a few textrels, so we'll skip that complexity.
120 ; Macros to eliminate most code duplication between x86_32 and x86_64:
121 ; Currently this works only for leaf functions which load all their arguments
122 ; into registers at the start, and make no other use of the stack. Luckily that
123 ; covers most of x264's asm.
126 ; %1 = number of arguments. loads them from stack if needed.
127 ; %2 = number of registers used. pushes callee-saved regs if needed.
128 ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
129 ; %4 = (optional) stack size to be allocated. The stack will be aligned before
130 ; allocating the specified stack size. If the required stack alignment is
131 ; larger than the known stack alignment the stack will be manually aligned
132 ; and an extra register will be allocated to hold the original stack
133 ; pointer (to not invalidate r0m etc.). To prevent the use of an extra
134 ; register as stack pointer, request a negative stack size.
135 ; %4+/%5+ = list of names to define to registers
136 ; PROLOGUE can also be invoked by adding the same options to cglobal
139 ; cglobal foo, 2,3,7,0x40, dst, src, tmp
140 ; declares a function (foo) that automatically loads two arguments (dst and
141 ; src) into registers, uses one additional register (tmp) plus 7 vector
142 ; registers (m0-m6) and allocates 0x40 bytes of stack space.
144 ; TODO Some functions can use some args directly from the stack. If they're the
145 ; last args then you can just not declare them, but if they're in the middle
146 ; we need more flexible macro.
149 ; Pops anything that was pushed by PROLOGUE, and returns.
152 ; Use this instead of RET if it's a branch target.
155 ; rN and rNq are the native-size register holding function argument N
156 ; rNd, rNw, rNb are dword, word, and byte size
157 ; rNh is the high 8 bits of the word size
158 ; rNm is the original location of arg N (a register or on the stack), dword
159 ; rNmp is native size
161 %macro DECLARE_REG 2-3
171 %elif ARCH_X86_64 ; memory
172 %define r%1m [rstk + stack_offset + %3]
173 %define r%1mp qword r %+ %1 %+ m
175 %define r%1m [rstk + stack_offset + %3]
176 %define r%1mp dword r %+ %1 %+ m
181 %macro DECLARE_REG_SIZE 3
197 DECLARE_REG_SIZE ax, al, ah
198 DECLARE_REG_SIZE bx, bl, bh
199 DECLARE_REG_SIZE cx, cl, ch
200 DECLARE_REG_SIZE dx, dl, dh
201 DECLARE_REG_SIZE si, sil, null
202 DECLARE_REG_SIZE di, dil, null
203 DECLARE_REG_SIZE bp, bpl, null
205 ; t# defines for when per-arch register allocation is more complex than just function arguments
207 %macro DECLARE_REG_TMP 1-*
210 CAT_XDEFINE t, %%i, r%1
216 %macro DECLARE_REG_TMP_SIZE 0-*
218 %define t%1q t%1 %+ q
219 %define t%1d t%1 %+ d
220 %define t%1w t%1 %+ w
221 %define t%1h t%1 %+ h
222 %define t%1b t%1 %+ b
227 DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
238 %assign stack_offset stack_offset+gprsize
245 %assign stack_offset stack_offset-gprsize
249 %macro PUSH_IF_USED 1-*
258 %macro POP_IF_USED 1-*
267 %macro LOAD_IF_USED 1-*
270 mov r%1, r %+ %1 %+ mp
279 %assign stack_offset stack_offset+(%2)
286 %assign stack_offset stack_offset-(%2)
296 %macro movsxdifnidn 2
304 %error assertion ``%1'' failed
308 %macro DEFINE_ARGS 0-*
312 CAT_UNDEF arg_name %+ %%i, q
313 CAT_UNDEF arg_name %+ %%i, d
314 CAT_UNDEF arg_name %+ %%i, w
315 CAT_UNDEF arg_name %+ %%i, h
316 CAT_UNDEF arg_name %+ %%i, b
317 CAT_UNDEF arg_name %+ %%i, m
318 CAT_UNDEF arg_name %+ %%i, mp
319 CAT_UNDEF arg_name, %%i
324 %xdefine %%stack_offset stack_offset
325 %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
328 %xdefine %1q r %+ %%i %+ q
329 %xdefine %1d r %+ %%i %+ d
330 %xdefine %1w r %+ %%i %+ w
331 %xdefine %1h r %+ %%i %+ h
332 %xdefine %1b r %+ %%i %+ b
333 %xdefine %1m r %+ %%i %+ m
334 %xdefine %1mp r %+ %%i %+ mp
335 CAT_XDEFINE arg_name, %%i, %1
339 %xdefine stack_offset %%stack_offset
340 %assign n_arg_names %0
343 %define required_stack_alignment ((mmsize + 15) & ~15)
344 %define vzeroupper_required (mmsize > 16 && (ARCH_X86_64 == 0 || xmm_regs_used > 16 || notcpuflag(avx512)))
345 %define high_mm_regs (16*cpuflag(avx512))
347 %macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
351 %assign stack_size %1
353 %assign stack_size -stack_size
356 %assign %%pad %%pad + 32 ; shadow space
358 %assign xmm_regs_used %2
359 %if xmm_regs_used > 8
360 %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers
364 %if required_stack_alignment <= STACK_ALIGNMENT
365 ; maintain the current stack alignment
366 %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
367 SUB rsp, stack_size_padded
369 %assign %%reg_num (regs_used - 1)
370 %xdefine rstk r %+ %%reg_num
371 ; align stack, and save original stack location directly above
372 ; it, i.e. in [rsp+stack_size_padded], so we can restore the
373 ; stack in a single instruction (i.e. mov rsp, rstk or mov
374 ; rsp, [rsp+stack_size_padded])
375 %if %1 < 0 ; need to store rsp on stack
376 %xdefine rstkm [rsp + stack_size + %%pad]
377 %assign %%pad %%pad + gprsize
378 %else ; can keep rsp in rstk during whole function
381 %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1))
383 and rsp, ~(required_stack_alignment-1)
384 sub rsp, stack_size_padded
385 movifnidn rstkm, rstk
392 %macro SETUP_STACK_POINTER 1
394 %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT
396 ; Reserve an additional register for storing the original stack pointer, but avoid using
397 ; eax/rax for this purpose since it can potentially get overwritten as a return value.
398 %assign regs_used (regs_used + 1)
399 %if ARCH_X86_64 && regs_used == 7
401 %elif ARCH_X86_64 == 0 && regs_used == 1
405 %if ARCH_X86_64 && regs_used < 5 + UNIX64 * 3
406 ; Ensure that we don't clobber any registers containing arguments. For UNIX64 we also preserve r6 (rax)
407 ; since it's used as a hidden argument in vararg functions to specify the number of vector registers used.
408 %assign regs_used 5 + UNIX64 * 3
414 %macro DEFINE_ARGS_INTERNAL 3+
424 %if WIN64 ; Windows x64 ;=================================================
430 DECLARE_REG 4, R10, 40
431 DECLARE_REG 5, R11, 48
432 DECLARE_REG 6, rax, 56
433 DECLARE_REG 7, rdi, 64
434 DECLARE_REG 8, rsi, 72
435 DECLARE_REG 9, rbx, 80
436 DECLARE_REG 10, rbp, 88
437 DECLARE_REG 11, R14, 96
438 DECLARE_REG 12, R15, 104
439 DECLARE_REG 13, R12, 112
440 DECLARE_REG 14, R13, 120
442 %macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
445 ASSERT regs_used >= num_args
446 SETUP_STACK_POINTER %4
447 ASSERT regs_used <= 15
448 PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
450 %if mmsize != 8 && stack_size == 0
453 LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
454 DEFINE_ARGS_INTERNAL %0, %4, %5
457 %macro WIN64_PUSH_XMM 0
458 ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated.
459 %if xmm_regs_used > 6 + high_mm_regs
460 movaps [rstk + stack_offset + 8], xmm6
462 %if xmm_regs_used > 7 + high_mm_regs
463 movaps [rstk + stack_offset + 24], xmm7
465 %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
466 %if %%xmm_regs_on_stack > 0
468 %rep %%xmm_regs_on_stack
469 movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i
475 %macro WIN64_SPILL_XMM 1
476 %assign xmm_regs_used %1
477 ASSERT xmm_regs_used <= 16 + high_mm_regs
478 %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
479 %if %%xmm_regs_on_stack > 0
480 ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack.
481 %assign %%pad %%xmm_regs_on_stack*16 + 32
482 %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
483 SUB rsp, stack_size_padded
488 %macro WIN64_RESTORE_XMM_INTERNAL 0
490 %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
491 %if %%xmm_regs_on_stack > 0
492 %assign %%i xmm_regs_used - high_mm_regs
493 %rep %%xmm_regs_on_stack
495 movaps xmm %+ %%i, [rsp + (%%i-8)*16 + stack_size + 32]
498 %if stack_size_padded > 0
499 %if stack_size > 0 && required_stack_alignment > STACK_ALIGNMENT
502 add rsp, stack_size_padded
503 %assign %%pad_size stack_size_padded
506 %if xmm_regs_used > 7 + high_mm_regs
507 movaps xmm7, [rsp + stack_offset - %%pad_size + 24]
509 %if xmm_regs_used > 6 + high_mm_regs
510 movaps xmm6, [rsp + stack_offset - %%pad_size + 8]
514 %macro WIN64_RESTORE_XMM 0
515 WIN64_RESTORE_XMM_INTERNAL
516 %assign stack_offset (stack_offset-stack_size_padded)
517 %assign stack_size_padded 0
518 %assign xmm_regs_used 0
521 %define has_epilogue regs_used > 7 || stack_size > 0 || vzeroupper_required || xmm_regs_used > 6+high_mm_regs
524 WIN64_RESTORE_XMM_INTERNAL
525 POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
526 %if vzeroupper_required
532 %elif ARCH_X86_64 ; *nix x64 ;=============================================
540 DECLARE_REG 6, rax, 8
541 DECLARE_REG 7, R10, 16
542 DECLARE_REG 8, R11, 24
543 DECLARE_REG 9, rbx, 32
544 DECLARE_REG 10, rbp, 40
545 DECLARE_REG 11, R14, 48
546 DECLARE_REG 12, R15, 56
547 DECLARE_REG 13, R12, 64
548 DECLARE_REG 14, R13, 72
550 %macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
553 %assign xmm_regs_used %3
554 ASSERT regs_used >= num_args
555 SETUP_STACK_POINTER %4
556 ASSERT regs_used <= 15
557 PUSH_IF_USED 9, 10, 11, 12, 13, 14
559 LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
560 DEFINE_ARGS_INTERNAL %0, %4, %5
563 %define has_epilogue regs_used > 9 || stack_size > 0 || vzeroupper_required
566 %if stack_size_padded > 0
567 %if required_stack_alignment > STACK_ALIGNMENT
570 add rsp, stack_size_padded
573 POP_IF_USED 14, 13, 12, 11, 10, 9
574 %if vzeroupper_required
580 %else ; X86_32 ;==============================================================
582 DECLARE_REG 0, eax, 4
583 DECLARE_REG 1, ecx, 8
584 DECLARE_REG 2, edx, 12
585 DECLARE_REG 3, ebx, 16
586 DECLARE_REG 4, esi, 20
587 DECLARE_REG 5, edi, 24
588 DECLARE_REG 6, ebp, 28
591 %macro DECLARE_ARG 1-*
593 %define r%1m [rstk + stack_offset + 4*%1 + 4]
594 %define r%1mp dword r%1m
599 DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
601 %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
604 ASSERT regs_used >= num_args
611 SETUP_STACK_POINTER %4
612 ASSERT regs_used <= 7
613 PUSH_IF_USED 3, 4, 5, 6
615 LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
616 DEFINE_ARGS_INTERNAL %0, %4, %5
619 %define has_epilogue regs_used > 3 || stack_size > 0 || vzeroupper_required
622 %if stack_size_padded > 0
623 %if required_stack_alignment > STACK_ALIGNMENT
626 add rsp, stack_size_padded
629 POP_IF_USED 6, 5, 4, 3
630 %if vzeroupper_required
636 %endif ;======================================================================
639 %macro WIN64_SPILL_XMM 1
641 %macro WIN64_RESTORE_XMM 0
643 %macro WIN64_PUSH_XMM 0
647 ; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
648 ; a branch or a branch target. So switch to a 2-byte form of ret in that case.
649 ; We can automatically detect "follows a branch", but not a branch target.
650 ; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
652 %if has_epilogue || cpuflag(ssse3)
657 annotate_function_size
660 %define last_branch_adr $$
661 %macro AUTO_REP_RET 0
662 %if notcpuflag(ssse3)
663 times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ == last_branch_adr.
666 annotate_function_size
669 %macro BRANCH_INSTR 0-*
673 %if notcpuflag(ssse3)
675 %xdefine last_branch_adr %%branch_instr
682 BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp
684 %macro TAIL_CALL 2 ; callee, is_nonadjacent
691 annotate_function_size
694 ;=============================================================================
695 ; arch-independent part
696 ;=============================================================================
698 %assign function_align 16
701 ; Applies any symbol mangling needed for C linkage, and sets up a define such that
702 ; subsequent uses of the function name automatically refer to the mangled version.
703 ; Appends cpuflags to the function name if cpuflags has been specified.
704 ; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
705 ; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
706 %macro cglobal 1-2+ "" ; name, [PROLOGUE args]
707 cglobal_internal 1, %1 %+ SUFFIX, %2
709 %macro cvisible 1-2+ "" ; name, [PROLOGUE args]
710 cglobal_internal 0, %1 %+ SUFFIX, %2
712 %macro cglobal_internal 2-3+
713 annotate_function_size
715 %xdefine %%FUNCTION_PREFIX private_prefix
716 %xdefine %%VISIBILITY hidden
718 %xdefine %%FUNCTION_PREFIX public_prefix
719 %xdefine %%VISIBILITY
722 %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
723 %xdefine %2.skip_prologue %2 %+ .skip_prologue
724 CAT_XDEFINE cglobaled_, %2, 1
726 %xdefine current_function %2
727 %xdefine current_function_section __SECT__
729 global %2:function %%VISIBILITY
735 RESET_MM_PERMUTATION ; needed for x86-64, also makes disassembly somewhat nicer
736 %xdefine rstk rsp ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required
737 %assign stack_offset 0 ; stack pointer offset relative to the return address
738 %assign stack_size 0 ; amount of stack space that can be freely used inside a function
739 %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding
740 %assign xmm_regs_used 0 ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64 and vzeroupper
746 ; Create a global symbol from a local label with the correct name mangling and type
747 %macro cglobal_label 1
749 global current_function %+ %1:function hidden
751 global current_function %+ %1
757 %xdefine %1 mangle(private_prefix %+ _ %+ %1)
758 CAT_XDEFINE cglobaled_, %1, 1
762 ; like cextern, but without the prefix
763 %macro cextern_naked 1
765 %xdefine %1 mangle(%1)
767 CAT_XDEFINE cglobaled_, %1, 1
772 %xdefine %1 mangle(private_prefix %+ _ %+ %1)
774 global %1:data hidden
781 ; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default.
783 [SECTION .note.GNU-stack noalloc noexec nowrite progbits]
786 ; Tell debuggers how large the function was.
787 ; This may be invoked multiple times per function; we rely on later instances overriding earlier ones.
788 ; This is invoked by RET and similar macros, and also cglobal does it for the previous function,
789 ; but if the last function in a source file doesn't use any of the standard macros for its epilogue,
790 ; then its size might be unspecified.
791 %macro annotate_function_size 0
793 %ifdef current_function
795 current_function_section
797 size current_function %%ecf - current_function
806 %assign cpuflags_mmx (1<<0)
807 %assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
808 %assign cpuflags_3dnow (1<<2) | cpuflags_mmx
809 %assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
810 %assign cpuflags_sse (1<<4) | cpuflags_mmx2
811 %assign cpuflags_sse2 (1<<5) | cpuflags_sse
812 %assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
813 %assign cpuflags_lzcnt (1<<7) | cpuflags_sse2
814 %assign cpuflags_sse3 (1<<8) | cpuflags_sse2
815 %assign cpuflags_ssse3 (1<<9) | cpuflags_sse3
816 %assign cpuflags_sse4 (1<<10)| cpuflags_ssse3
817 %assign cpuflags_sse42 (1<<11)| cpuflags_sse4
818 %assign cpuflags_aesni (1<<12)| cpuflags_sse42
819 %assign cpuflags_avx (1<<13)| cpuflags_sse42
820 %assign cpuflags_xop (1<<14)| cpuflags_avx
821 %assign cpuflags_fma4 (1<<15)| cpuflags_avx
822 %assign cpuflags_fma3 (1<<16)| cpuflags_avx
823 %assign cpuflags_bmi1 (1<<17)| cpuflags_avx|cpuflags_lzcnt
824 %assign cpuflags_bmi2 (1<<18)| cpuflags_bmi1
825 %assign cpuflags_avx2 (1<<19)| cpuflags_fma3|cpuflags_bmi2
826 %assign cpuflags_avx512 (1<<20)| cpuflags_avx2 ; F, CD, BW, DQ, VL
828 %assign cpuflags_cache32 (1<<21)
829 %assign cpuflags_cache64 (1<<22)
830 %assign cpuflags_aligned (1<<23) ; not a cpu feature, but a function variant
831 %assign cpuflags_atom (1<<24)
833 ; Returns a boolean value expressing whether or not the specified cpuflag is enabled.
834 %define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1)
835 %define notcpuflag(x) (cpuflag(x) ^ 1)
837 ; Takes an arbitrary number of cpuflags from the above list.
838 ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
839 ; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
840 %macro INIT_CPUFLAGS 0-*
848 %xdefine cpuname cpuname %+ _%1
852 %assign cpuflags cpuflags | cpuflags_%1
855 %xdefine SUFFIX _ %+ cpuname
858 %assign avx_enabled 1
860 %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2))
863 %define movnta movntps
867 %elif cpuflag(sse3) && notcpuflag(ssse3)
872 %if ARCH_X86_64 || cpuflag(sse2)
879 ; Merge mmx, sse*, and avx*
880 ; m# is a simd register of the currently selected size
881 ; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m#
882 ; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m#
883 ; zm# is the corresponding zmm register if mmsize >= 64, otherwise the same as m#
884 ; (All 4 remain in sync through SWAP.)
894 %macro DEFINE_MMREGS 1 ; mmtype
895 %assign %%prev_mmregs 0
897 %assign %%prev_mmregs num_mmregs
901 %if ARCH_X86_64 && mmsize >= 16
902 %assign num_mmregs 16
903 %if cpuflag(avx512) || mmsize == 64
904 %assign num_mmregs 32
910 CAT_XDEFINE m, %%i, %1 %+ %%i
911 CAT_XDEFINE nn%1, %%i, %%i
914 %if %%prev_mmregs > num_mmregs
915 %rep %%prev_mmregs - num_mmregs
917 CAT_UNDEF nn %+ mmtype, %%i
924 ; Prefer registers 16-31 over 0-15 to avoid having to use vzeroupper
925 %macro AVX512_MM_PERMUTATION 0-1 0 ; start_reg
926 %if ARCH_X86_64 && cpuflag(avx512)
929 %assign %%i_high %%i+16
937 %assign avx_enabled 0
938 %define RESET_MM_PERMUTATION INIT_MMX %1
943 %define movnta movntq
949 %assign avx_enabled 0
950 %define RESET_MM_PERMUTATION INIT_XMM %1
955 %define movnta movntdq
959 AVX512_MM_PERMUTATION 6 ; Swap callee-saved registers with volatile registers
964 %assign avx_enabled 1
965 %define RESET_MM_PERMUTATION INIT_YMM %1
970 %define movnta movntdq
973 AVX512_MM_PERMUTATION
977 %assign avx_enabled 1
978 %define RESET_MM_PERMUTATION INIT_ZMM %1
983 %define movnta movntdq
986 AVX512_MM_PERMUTATION
991 %macro DECLARE_MMCAST 1
997 %define xmmxmm%1 xmm%1
998 %define xmmymm%1 xmm%1
999 %define xmmzmm%1 xmm%1
1000 %define ymmmm%1 mm%1
1001 %define ymmxmm%1 xmm%1
1002 %define ymmymm%1 ymm%1
1003 %define ymmzmm%1 ymm%1
1004 %define zmmmm%1 mm%1
1005 %define zmmxmm%1 xmm%1
1006 %define zmmymm%1 ymm%1
1007 %define zmmzmm%1 zmm%1
1008 %define xm%1 xmm %+ m%1
1009 %define ym%1 ymm %+ m%1
1010 %define zm%1 zmm %+ m%1
1019 ; I often want to use macros that permute their arguments. e.g. there's no
1020 ; efficient way to implement butterfly or transpose or dct without swapping some
1023 ; I would like to not have to manually keep track of the permutations:
1024 ; If I insert a permutation in the middle of a function, it should automatically
1025 ; change everything that follows. For more complex macros I may also have multiple
1026 ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
1028 ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
1029 ; permutes its arguments. It's equivalent to exchanging the contents of the
1030 ; registers, except that this way you exchange the register names instead, so it
1031 ; doesn't cost any cycles.
1033 %macro PERMUTE 2-* ; takes a list of pairs to swap
1035 %xdefine %%tmp%2 m%2
1039 %xdefine m%1 %%tmp%2
1040 CAT_XDEFINE nn, m%1, %1
1045 %macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
1046 %ifnum %1 ; SWAP 0, 1, ...
1047 SWAP_INTERNAL_NUM %1, %2
1048 %else ; SWAP m0, m1, ...
1049 SWAP_INTERNAL_NAME %1, %2
1053 %macro SWAP_INTERNAL_NUM 2-*
1058 CAT_XDEFINE nn, m%1, %1
1059 CAT_XDEFINE nn, m%2, %2
1064 %macro SWAP_INTERNAL_NAME 2-*
1065 %xdefine %%args nn %+ %1
1067 %xdefine %%args %%args, nn %+ %2
1070 SWAP_INTERNAL_NUM %%args
1073 ; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
1074 ; calls to that function will automatically load the permutation, so values can
1075 ; be returned in mmregs.
1076 %macro SAVE_MM_PERMUTATION 0-1
1080 %xdefine %%f current_function %+ _m
1084 CAT_XDEFINE %%f, %%i, m %+ %%i
1089 %macro LOAD_MM_PERMUTATION 1 ; name to load from
1093 CAT_XDEFINE m, %%i, %1_m %+ %%i
1094 CAT_XDEFINE nn, m %+ %%i, %%i
1100 ; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
1103 call_internal %1 %+ SUFFIX, %1
1108 %macro call_internal 2
1110 %ifndef cglobaled_%2
1116 LOAD_MM_PERMUTATION %%i
1119 ; Substitutions that reduce instruction size but are functionally equivalent
1144 ;=============================================================================
1145 ; AVX abstraction layer
1146 ;=============================================================================
1151 CAT_XDEFINE sizeofmm, i, 8
1152 CAT_XDEFINE regnumofmm, i, i
1154 CAT_XDEFINE sizeofxmm, i, 16
1155 CAT_XDEFINE sizeofymm, i, 32
1156 CAT_XDEFINE sizeofzmm, i, 64
1157 CAT_XDEFINE regnumofxmm, i, i
1158 CAT_XDEFINE regnumofymm, i, i
1159 CAT_XDEFINE regnumofzmm, i, i
1164 %macro CHECK_AVX_INSTR_EMU 3-*
1165 %xdefine %%opcode %1
1169 %error non-avx emulation of ``%%opcode'' is not supported
1176 ;%2 == minimal instruction set
1177 ;%3 == 1 if float, 0 if int
1178 ;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
1179 ;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
1181 %macro RUN_AVX_INSTR 6-9+
1183 %assign __sizeofreg sizeof%7
1185 %assign __sizeofreg sizeof%6
1187 %assign __sizeofreg mmsize
1189 %assign __emulate_avx 0
1190 %if avx_enabled && __sizeofreg >= 16
1191 %xdefine __instr v%1
1195 %assign __emulate_avx 1
1201 %error use of ``%1'' %2 instruction in cpuname function: current_function
1202 %elif cpuflags_%2 < cpuflags_sse && notcpuflag(sse2) && __sizeofreg > 8
1203 %error use of ``%1'' sse2 instruction in cpuname function: current_function
1217 ; 3-operand AVX instructions with a memory arg can only have it in src2,
1218 ; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
1219 ; So, if the instruction is commutative with a memory arg, swap them.
1227 CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, __src2, %9
1229 CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, __src2
1231 %if __sizeofreg == 8
1245 __instr %6, %7, %8, %9
1256 ;%2 == minimal instruction set
1257 ;%3 == 1 if float, 0 if int
1258 ;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
1259 ;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
1260 %macro AVX_INSTR 1-5 fnord, 0, 255, 0
1261 %macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5
1263 RUN_AVX_INSTR %6, %7, %8, %9, %10, %1
1265 RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2
1267 RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3
1269 RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4
1271 RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4, %5
1276 ; Instructions with both VEX/EVEX and legacy encodings
1277 ; Non-destructive instructions are written without parameters
1278 AVX_INSTR addpd, sse2, 1, 0, 1
1279 AVX_INSTR addps, sse, 1, 0, 1
1280 AVX_INSTR addsd, sse2, 1, 0, 0
1281 AVX_INSTR addss, sse, 1, 0, 0
1282 AVX_INSTR addsubpd, sse3, 1, 0, 0
1283 AVX_INSTR addsubps, sse3, 1, 0, 0
1284 AVX_INSTR aesdec, aesni, 0, 0, 0
1285 AVX_INSTR aesdeclast, aesni, 0, 0, 0
1286 AVX_INSTR aesenc, aesni, 0, 0, 0
1287 AVX_INSTR aesenclast, aesni, 0, 0, 0
1288 AVX_INSTR aesimc, aesni
1289 AVX_INSTR aeskeygenassist, aesni
1290 AVX_INSTR andnpd, sse2, 1, 0, 0
1291 AVX_INSTR andnps, sse, 1, 0, 0
1292 AVX_INSTR andpd, sse2, 1, 0, 1
1293 AVX_INSTR andps, sse, 1, 0, 1
1294 AVX_INSTR blendpd, sse4, 1, 1, 0
1295 AVX_INSTR blendps, sse4, 1, 1, 0
1296 AVX_INSTR blendvpd, sse4 ; can't be emulated
1297 AVX_INSTR blendvps, sse4 ; can't be emulated
1298 AVX_INSTR cmpeqpd, sse2, 1, 0, 1
1299 AVX_INSTR cmpeqps, sse, 1, 0, 1
1300 AVX_INSTR cmpeqsd, sse2, 1, 0, 0
1301 AVX_INSTR cmpeqss, sse, 1, 0, 0
1302 AVX_INSTR cmplepd, sse2, 1, 0, 0
1303 AVX_INSTR cmpleps, sse, 1, 0, 0
1304 AVX_INSTR cmplesd, sse2, 1, 0, 0
1305 AVX_INSTR cmpless, sse, 1, 0, 0
1306 AVX_INSTR cmpltpd, sse2, 1, 0, 0
1307 AVX_INSTR cmpltps, sse, 1, 0, 0
1308 AVX_INSTR cmpltsd, sse2, 1, 0, 0
1309 AVX_INSTR cmpltss, sse, 1, 0, 0
1310 AVX_INSTR cmpneqpd, sse2, 1, 0, 1
1311 AVX_INSTR cmpneqps, sse, 1, 0, 1
1312 AVX_INSTR cmpneqsd, sse2, 1, 0, 0
1313 AVX_INSTR cmpneqss, sse, 1, 0, 0
1314 AVX_INSTR cmpnlepd, sse2, 1, 0, 0
1315 AVX_INSTR cmpnleps, sse, 1, 0, 0
1316 AVX_INSTR cmpnlesd, sse2, 1, 0, 0
1317 AVX_INSTR cmpnless, sse, 1, 0, 0
1318 AVX_INSTR cmpnltpd, sse2, 1, 0, 0
1319 AVX_INSTR cmpnltps, sse, 1, 0, 0
1320 AVX_INSTR cmpnltsd, sse2, 1, 0, 0
1321 AVX_INSTR cmpnltss, sse, 1, 0, 0
1322 AVX_INSTR cmpordpd, sse2 1, 0, 1
1323 AVX_INSTR cmpordps, sse 1, 0, 1
1324 AVX_INSTR cmpordsd, sse2 1, 0, 0
1325 AVX_INSTR cmpordss, sse 1, 0, 0
1326 AVX_INSTR cmppd, sse2, 1, 1, 0
1327 AVX_INSTR cmpps, sse, 1, 1, 0
1328 AVX_INSTR cmpsd, sse2, 1, 1, 0
1329 AVX_INSTR cmpss, sse, 1, 1, 0
1330 AVX_INSTR cmpunordpd, sse2, 1, 0, 1
1331 AVX_INSTR cmpunordps, sse, 1, 0, 1
1332 AVX_INSTR cmpunordsd, sse2, 1, 0, 0
1333 AVX_INSTR cmpunordss, sse, 1, 0, 0
1334 AVX_INSTR comisd, sse2
1335 AVX_INSTR comiss, sse
1336 AVX_INSTR cvtdq2pd, sse2
1337 AVX_INSTR cvtdq2ps, sse2
1338 AVX_INSTR cvtpd2dq, sse2
1339 AVX_INSTR cvtpd2ps, sse2
1340 AVX_INSTR cvtps2dq, sse2
1341 AVX_INSTR cvtps2pd, sse2
1342 AVX_INSTR cvtsd2si, sse2
1343 AVX_INSTR cvtsd2ss, sse2, 1, 0, 0
1344 AVX_INSTR cvtsi2sd, sse2, 1, 0, 0
1345 AVX_INSTR cvtsi2ss, sse, 1, 0, 0
1346 AVX_INSTR cvtss2sd, sse2, 1, 0, 0
1347 AVX_INSTR cvtss2si, sse
1348 AVX_INSTR cvttpd2dq, sse2
1349 AVX_INSTR cvttps2dq, sse2
1350 AVX_INSTR cvttsd2si, sse2
1351 AVX_INSTR cvttss2si, sse
1352 AVX_INSTR divpd, sse2, 1, 0, 0
1353 AVX_INSTR divps, sse, 1, 0, 0
1354 AVX_INSTR divsd, sse2, 1, 0, 0
1355 AVX_INSTR divss, sse, 1, 0, 0
1356 AVX_INSTR dppd, sse4, 1, 1, 0
1357 AVX_INSTR dpps, sse4, 1, 1, 0
1358 AVX_INSTR extractps, sse4
1359 AVX_INSTR haddpd, sse3, 1, 0, 0
1360 AVX_INSTR haddps, sse3, 1, 0, 0
1361 AVX_INSTR hsubpd, sse3, 1, 0, 0
1362 AVX_INSTR hsubps, sse3, 1, 0, 0
1363 AVX_INSTR insertps, sse4, 1, 1, 0
1364 AVX_INSTR lddqu, sse3
1365 AVX_INSTR ldmxcsr, sse
1366 AVX_INSTR maskmovdqu, sse2
1367 AVX_INSTR maxpd, sse2, 1, 0, 1
1368 AVX_INSTR maxps, sse, 1, 0, 1
1369 AVX_INSTR maxsd, sse2, 1, 0, 0
1370 AVX_INSTR maxss, sse, 1, 0, 0
1371 AVX_INSTR minpd, sse2, 1, 0, 1
1372 AVX_INSTR minps, sse, 1, 0, 1
1373 AVX_INSTR minsd, sse2, 1, 0, 0
1374 AVX_INSTR minss, sse, 1, 0, 0
1375 AVX_INSTR movapd, sse2
1376 AVX_INSTR movaps, sse
1378 AVX_INSTR movddup, sse3
1379 AVX_INSTR movdqa, sse2
1380 AVX_INSTR movdqu, sse2
1381 AVX_INSTR movhlps, sse, 1, 0, 0
1382 AVX_INSTR movhpd, sse2, 1, 0, 0
1383 AVX_INSTR movhps, sse, 1, 0, 0
1384 AVX_INSTR movlhps, sse, 1, 0, 0
1385 AVX_INSTR movlpd, sse2, 1, 0, 0
1386 AVX_INSTR movlps, sse, 1, 0, 0
1387 AVX_INSTR movmskpd, sse2
1388 AVX_INSTR movmskps, sse
1389 AVX_INSTR movntdq, sse2
1390 AVX_INSTR movntdqa, sse4
1391 AVX_INSTR movntpd, sse2
1392 AVX_INSTR movntps, sse
1394 AVX_INSTR movsd, sse2, 1, 0, 0
1395 AVX_INSTR movshdup, sse3
1396 AVX_INSTR movsldup, sse3
1397 AVX_INSTR movss, sse, 1, 0, 0
1398 AVX_INSTR movupd, sse2
1399 AVX_INSTR movups, sse
1400 AVX_INSTR mpsadbw, sse4, 0, 1, 0
1401 AVX_INSTR mulpd, sse2, 1, 0, 1
1402 AVX_INSTR mulps, sse, 1, 0, 1
1403 AVX_INSTR mulsd, sse2, 1, 0, 0
1404 AVX_INSTR mulss, sse, 1, 0, 0
1405 AVX_INSTR orpd, sse2, 1, 0, 1
1406 AVX_INSTR orps, sse, 1, 0, 1
1407 AVX_INSTR pabsb, ssse3
1408 AVX_INSTR pabsd, ssse3
1409 AVX_INSTR pabsw, ssse3
1410 AVX_INSTR packsswb, mmx, 0, 0, 0
1411 AVX_INSTR packssdw, mmx, 0, 0, 0
1412 AVX_INSTR packuswb, mmx, 0, 0, 0
1413 AVX_INSTR packusdw, sse4, 0, 0, 0
1414 AVX_INSTR paddb, mmx, 0, 0, 1
1415 AVX_INSTR paddw, mmx, 0, 0, 1
1416 AVX_INSTR paddd, mmx, 0, 0, 1
1417 AVX_INSTR paddq, sse2, 0, 0, 1
1418 AVX_INSTR paddsb, mmx, 0, 0, 1
1419 AVX_INSTR paddsw, mmx, 0, 0, 1
1420 AVX_INSTR paddusb, mmx, 0, 0, 1
1421 AVX_INSTR paddusw, mmx, 0, 0, 1
1422 AVX_INSTR palignr, ssse3, 0, 1, 0
1423 AVX_INSTR pand, mmx, 0, 0, 1
1424 AVX_INSTR pandn, mmx, 0, 0, 0
1425 AVX_INSTR pavgb, mmx2, 0, 0, 1
1426 AVX_INSTR pavgw, mmx2, 0, 0, 1
1427 AVX_INSTR pblendvb, sse4 ; can't be emulated
1428 AVX_INSTR pblendw, sse4, 0, 1, 0
1429 AVX_INSTR pclmulqdq, fnord, 0, 1, 0
1430 AVX_INSTR pclmulhqhqdq, fnord, 0, 0, 0
1431 AVX_INSTR pclmulhqlqdq, fnord, 0, 0, 0
1432 AVX_INSTR pclmullqhqdq, fnord, 0, 0, 0
1433 AVX_INSTR pclmullqlqdq, fnord, 0, 0, 0
1434 AVX_INSTR pcmpestri, sse42
1435 AVX_INSTR pcmpestrm, sse42
1436 AVX_INSTR pcmpistri, sse42
1437 AVX_INSTR pcmpistrm, sse42
1438 AVX_INSTR pcmpeqb, mmx, 0, 0, 1
1439 AVX_INSTR pcmpeqw, mmx, 0, 0, 1
1440 AVX_INSTR pcmpeqd, mmx, 0, 0, 1
1441 AVX_INSTR pcmpeqq, sse4, 0, 0, 1
1442 AVX_INSTR pcmpgtb, mmx, 0, 0, 0
1443 AVX_INSTR pcmpgtw, mmx, 0, 0, 0
1444 AVX_INSTR pcmpgtd, mmx, 0, 0, 0
1445 AVX_INSTR pcmpgtq, sse42, 0, 0, 0
1446 AVX_INSTR pextrb, sse4
1447 AVX_INSTR pextrd, sse4
1448 AVX_INSTR pextrq, sse4
1449 AVX_INSTR pextrw, mmx2
1450 AVX_INSTR phaddw, ssse3, 0, 0, 0
1451 AVX_INSTR phaddd, ssse3, 0, 0, 0
1452 AVX_INSTR phaddsw, ssse3, 0, 0, 0
1453 AVX_INSTR phminposuw, sse4
1454 AVX_INSTR phsubw, ssse3, 0, 0, 0
1455 AVX_INSTR phsubd, ssse3, 0, 0, 0
1456 AVX_INSTR phsubsw, ssse3, 0, 0, 0
1457 AVX_INSTR pinsrb, sse4, 0, 1, 0
1458 AVX_INSTR pinsrd, sse4, 0, 1, 0
1459 AVX_INSTR pinsrq, sse4, 0, 1, 0
1460 AVX_INSTR pinsrw, mmx2, 0, 1, 0
1461 AVX_INSTR pmaddwd, mmx, 0, 0, 1
1462 AVX_INSTR pmaddubsw, ssse3, 0, 0, 0
1463 AVX_INSTR pmaxsb, sse4, 0, 0, 1
1464 AVX_INSTR pmaxsw, mmx2, 0, 0, 1
1465 AVX_INSTR pmaxsd, sse4, 0, 0, 1
1466 AVX_INSTR pmaxub, mmx2, 0, 0, 1
1467 AVX_INSTR pmaxuw, sse4, 0, 0, 1
1468 AVX_INSTR pmaxud, sse4, 0, 0, 1
1469 AVX_INSTR pminsb, sse4, 0, 0, 1
1470 AVX_INSTR pminsw, mmx2, 0, 0, 1
1471 AVX_INSTR pminsd, sse4, 0, 0, 1
1472 AVX_INSTR pminub, mmx2, 0, 0, 1
1473 AVX_INSTR pminuw, sse4, 0, 0, 1
1474 AVX_INSTR pminud, sse4, 0, 0, 1
1475 AVX_INSTR pmovmskb, mmx2
1476 AVX_INSTR pmovsxbw, sse4
1477 AVX_INSTR pmovsxbd, sse4
1478 AVX_INSTR pmovsxbq, sse4
1479 AVX_INSTR pmovsxwd, sse4
1480 AVX_INSTR pmovsxwq, sse4
1481 AVX_INSTR pmovsxdq, sse4
1482 AVX_INSTR pmovzxbw, sse4
1483 AVX_INSTR pmovzxbd, sse4
1484 AVX_INSTR pmovzxbq, sse4
1485 AVX_INSTR pmovzxwd, sse4
1486 AVX_INSTR pmovzxwq, sse4
1487 AVX_INSTR pmovzxdq, sse4
1488 AVX_INSTR pmuldq, sse4, 0, 0, 1
1489 AVX_INSTR pmulhrsw, ssse3, 0, 0, 1
1490 AVX_INSTR pmulhuw, mmx2, 0, 0, 1
1491 AVX_INSTR pmulhw, mmx, 0, 0, 1
1492 AVX_INSTR pmullw, mmx, 0, 0, 1
1493 AVX_INSTR pmulld, sse4, 0, 0, 1
1494 AVX_INSTR pmuludq, sse2, 0, 0, 1
1495 AVX_INSTR por, mmx, 0, 0, 1
1496 AVX_INSTR psadbw, mmx2, 0, 0, 1
1497 AVX_INSTR pshufb, ssse3, 0, 0, 0
1498 AVX_INSTR pshufd, sse2
1499 AVX_INSTR pshufhw, sse2
1500 AVX_INSTR pshuflw, sse2
1501 AVX_INSTR psignb, ssse3, 0, 0, 0
1502 AVX_INSTR psignw, ssse3, 0, 0, 0
1503 AVX_INSTR psignd, ssse3, 0, 0, 0
1504 AVX_INSTR psllw, mmx, 0, 0, 0
1505 AVX_INSTR pslld, mmx, 0, 0, 0
1506 AVX_INSTR psllq, mmx, 0, 0, 0
1507 AVX_INSTR pslldq, sse2, 0, 0, 0
1508 AVX_INSTR psraw, mmx, 0, 0, 0
1509 AVX_INSTR psrad, mmx, 0, 0, 0
1510 AVX_INSTR psrlw, mmx, 0, 0, 0
1511 AVX_INSTR psrld, mmx, 0, 0, 0
1512 AVX_INSTR psrlq, mmx, 0, 0, 0
1513 AVX_INSTR psrldq, sse2, 0, 0, 0
1514 AVX_INSTR psubb, mmx, 0, 0, 0
1515 AVX_INSTR psubw, mmx, 0, 0, 0
1516 AVX_INSTR psubd, mmx, 0, 0, 0
1517 AVX_INSTR psubq, sse2, 0, 0, 0
1518 AVX_INSTR psubsb, mmx, 0, 0, 0
1519 AVX_INSTR psubsw, mmx, 0, 0, 0
1520 AVX_INSTR psubusb, mmx, 0, 0, 0
1521 AVX_INSTR psubusw, mmx, 0, 0, 0
1522 AVX_INSTR ptest, sse4
1523 AVX_INSTR punpckhbw, mmx, 0, 0, 0
1524 AVX_INSTR punpckhwd, mmx, 0, 0, 0
1525 AVX_INSTR punpckhdq, mmx, 0, 0, 0
1526 AVX_INSTR punpckhqdq, sse2, 0, 0, 0
1527 AVX_INSTR punpcklbw, mmx, 0, 0, 0
1528 AVX_INSTR punpcklwd, mmx, 0, 0, 0
1529 AVX_INSTR punpckldq, mmx, 0, 0, 0
1530 AVX_INSTR punpcklqdq, sse2, 0, 0, 0
1531 AVX_INSTR pxor, mmx, 0, 0, 1
1532 AVX_INSTR rcpps, sse
1533 AVX_INSTR rcpss, sse, 1, 0, 0
1534 AVX_INSTR roundpd, sse4
1535 AVX_INSTR roundps, sse4
1536 AVX_INSTR roundsd, sse4, 1, 1, 0
1537 AVX_INSTR roundss, sse4, 1, 1, 0
1538 AVX_INSTR rsqrtps, sse
1539 AVX_INSTR rsqrtss, sse, 1, 0, 0
1540 AVX_INSTR shufpd, sse2, 1, 1, 0
1541 AVX_INSTR shufps, sse, 1, 1, 0
1542 AVX_INSTR sqrtpd, sse2
1543 AVX_INSTR sqrtps, sse
1544 AVX_INSTR sqrtsd, sse2, 1, 0, 0
1545 AVX_INSTR sqrtss, sse, 1, 0, 0
1546 AVX_INSTR stmxcsr, sse
1547 AVX_INSTR subpd, sse2, 1, 0, 0
1548 AVX_INSTR subps, sse, 1, 0, 0
1549 AVX_INSTR subsd, sse2, 1, 0, 0
1550 AVX_INSTR subss, sse, 1, 0, 0
1551 AVX_INSTR ucomisd, sse2
1552 AVX_INSTR ucomiss, sse
1553 AVX_INSTR unpckhpd, sse2, 1, 0, 0
1554 AVX_INSTR unpckhps, sse, 1, 0, 0
1555 AVX_INSTR unpcklpd, sse2, 1, 0, 0
1556 AVX_INSTR unpcklps, sse, 1, 0, 0
1557 AVX_INSTR xorpd, sse2, 1, 0, 1
1558 AVX_INSTR xorps, sse, 1, 0, 1
1560 ; 3DNow instructions, for sharing code between AVX, SSE and 3DN
1561 AVX_INSTR pfadd, 3dnow, 1, 0, 1
1562 AVX_INSTR pfsub, 3dnow, 1, 0, 0
1563 AVX_INSTR pfmul, 3dnow, 1, 0, 1
1565 ; base-4 constants for shuffles
1568 %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
1570 CAT_XDEFINE q000, j, i
1572 CAT_XDEFINE q00, j, i
1574 CAT_XDEFINE q0, j, i
1584 %macro %1 4-7 %1, %2, %3
1591 %error non-xop emulation of ``%5 %1, %2, %3, %4'' is not supported
1596 FMA_INSTR pmacsww, pmullw, paddw
1597 FMA_INSTR pmacsdd, pmulld, paddd ; sse4 emulation
1598 FMA_INSTR pmacsdql, pmuldq, paddq ; sse4 emulation
1599 FMA_INSTR pmadcswd, pmaddwd, paddd
1601 ; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
1602 ; This lets us use tzcnt without bumping the yasm version requirement yet.
1603 %define tzcnt rep bsf
1605 ; Macros for consolidating FMA3 and FMA4 using 4-operand (dst, src1, src2, src3) syntax.
1606 ; FMA3 is only possible if dst is the same as one of the src registers.
1607 ; Either src2 or src3 can be a memory operand.
1608 %macro FMA4_INSTR 2-*
1610 %xdefine %$prefix %1
1612 %macro %$prefix%2 4-6 %$prefix, %2
1613 %if notcpuflag(fma3) && notcpuflag(fma4)
1614 %error use of ``%5%6'' fma instruction in cpuname function: current_function
1616 v%5%6 %1, %2, %3, %4
1618 ; If %3 or %4 is a memory operand it needs to be encoded as the last operand.
1620 v%{5}213%6 %2, %3, %4
1622 v%{5}132%6 %2, %4, %3
1625 v%{5}213%6 %3, %2, %4
1627 v%{5}231%6 %4, %2, %3
1629 %error fma3 emulation of ``%5%6 %1, %2, %3, %4'' is not supported
1637 FMA4_INSTR fmadd, pd, ps, sd, ss
1638 FMA4_INSTR fmaddsub, pd, ps
1639 FMA4_INSTR fmsub, pd, ps, sd, ss
1640 FMA4_INSTR fmsubadd, pd, ps
1641 FMA4_INSTR fnmadd, pd, ps, sd, ss
1642 FMA4_INSTR fnmsub, pd, ps, sd, ss
1644 ; Macros for converting VEX instructions to equivalent EVEX ones.
1645 %macro EVEX_INSTR 2-3 0 ; vex, evex, prefer_evex
1646 %macro %1 2-7 fnord, fnord, %1, %2, %3
1648 %define %%args %1, %2
1650 %define %%args %1, %2, %3
1652 %define %%args %1, %2, %3, %4
1654 %assign %%evex_required cpuflag(avx512) & %7
1656 %if regnumof%1 >= 16 || sizeof%1 > 32
1657 %assign %%evex_required 1
1661 %if regnumof%2 >= 16 || sizeof%2 > 32
1662 %assign %%evex_required 1
1668 %5 %%args ; Prefer VEX over EVEX due to shorter instruction length
1673 EVEX_INSTR vbroadcastf128, vbroadcastf32x4
1674 EVEX_INSTR vbroadcasti128, vbroadcasti32x4
1675 EVEX_INSTR vextractf128, vextractf32x4
1676 EVEX_INSTR vextracti128, vextracti32x4
1677 EVEX_INSTR vinsertf128, vinsertf32x4
1678 EVEX_INSTR vinserti128, vinserti32x4
1679 EVEX_INSTR vmovdqa, vmovdqa32
1680 EVEX_INSTR vmovdqu, vmovdqu32
1681 EVEX_INSTR vpand, vpandd
1682 EVEX_INSTR vpandn, vpandnd
1683 EVEX_INSTR vpor, vpord
1684 EVEX_INSTR vpxor, vpxord
1685 EVEX_INSTR vrcpps, vrcp14ps, 1 ; EVEX versions have higher precision
1686 EVEX_INSTR vrcpss, vrcp14ss, 1
1687 EVEX_INSTR vrsqrtps, vrsqrt14ps, 1
1688 EVEX_INSTR vrsqrtss, vrsqrt14ss, 1
1690 ; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug (fixed in 1.3.0)
1692 %if __YASM_VERSION_ID__ < 0x01030000 && ARCH_X86_64 == 0
1693 %macro vpbroadcastq 2