1 ;*****************************************************************************
2 ;* x86inc.asm: x264asm abstraction layer
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2012 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Anton Mitrofanov <BugMaster@narod.ru>
8 ;* Jason Garrett-Glaser <darkshikari@gmail.com>
9 ;* Henrik Gramner <hengar-6@student.ltu.se>
11 ;* Permission to use, copy, modify, and/or distribute this software for any
12 ;* purpose with or without fee is hereby granted, provided that the above
13 ;* copyright notice and this permission notice appear in all copies.
15 ;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
16 ;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 ;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
18 ;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 ;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ;*****************************************************************************
24 ; This is a header file for the x264ASM assembly language, which uses
25 ; NASM/YASM syntax combined with a large number of macros to provide easy
26 ; abstraction between different calling conventions (x86_32, win64, linux64).
27 ; It also has various other useful features to simplify writing the kind of
28 ; DSP functions that are most often used in x264.
30 ; Unlike the rest of x264, this file is available under an ISC license, as it
31 ; has significant usefulness outside of x264 and we want it to be available
32 ; to the largest audience possible. Of course, if you modify it for your own
33 ; purposes to add a new feature, we strongly encourage contributing a patch
34 ; as this feature might be useful for others as well. Send patches or ideas
35 ; to x264-devel@videolan.org .
37 %ifndef private_prefix
38 %define private_prefix x264
42 %define public_prefix private_prefix
48 %ifidn __OUTPUT_FORMAT__,win32
50 %elifidn __OUTPUT_FORMAT__,win64
58 %define mangle(x) _ %+ x
63 ; Name of the .rodata section.
64 ; Kludge: Something on OS X fails to align .rodata even given an align attribute,
65 ; so use a different read-only section.
66 %macro SECTION_RODATA 0-1 16
67 %ifidn __OUTPUT_FORMAT__,macho64
68 SECTION .text align=%1
69 %elifidn __OUTPUT_FORMAT__,macho
70 SECTION .text align=%1
72 %elifidn __OUTPUT_FORMAT__,aout
75 SECTION .rodata align=%1
79 ; aout does not support align=
80 %macro SECTION_TEXT 0-1 16
81 %ifidn __OUTPUT_FORMAT__,aout
84 SECTION .text align=%1
90 %elif ARCH_X86_64 == 0
91 ; x86_32 doesn't require PIC.
92 ; Some distros prefer shared objects to be PIC, but nothing breaks if
93 ; the code contains a few textrels, so we'll skip that complexity.
106 ; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
109 ; Macros to eliminate most code duplication between x86_32 and x86_64:
110 ; Currently this works only for leaf functions which load all their arguments
111 ; into registers at the start, and make no other use of the stack. Luckily that
112 ; covers most of x264's asm.
115 ; %1 = number of arguments. loads them from stack if needed.
116 ; %2 = number of registers used. pushes callee-saved regs if needed.
117 ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
118 ; %4 = (optional) stack size to be allocated. If not aligned (x86-32 ICC 10.x,
119 ; MSVC or YMM), the stack will be manually aligned (to 16 or 32 bytes),
120 ; and an extra register will be allocated to hold the original stack
121 ; pointer (to not invalidate r0m etc.). To prevent the use of an extra
122 ; register as stack pointer, request a negative stack size.
123 ; %4+/%5+ = list of names to define to registers
124 ; PROLOGUE can also be invoked by adding the same options to cglobal
127 ; cglobal foo, 2,3,0, dst, src, tmp
128 ; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
130 ; TODO Some functions can use some args directly from the stack. If they're the
131 ; last args then you can just not declare them, but if they're in the middle
132 ; we need more flexible macro.
135 ; Pops anything that was pushed by PROLOGUE, and returns.
138 ; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
139 ; which are slow when a normal ret follows a branch.
142 ; rN and rNq are the native-size register holding function argument N
143 ; rNd, rNw, rNb are dword, word, and byte size
144 ; rNh is the high 8 bits of the word size
145 ; rNm is the original location of arg N (a register or on the stack), dword
146 ; rNmp is native size
148 %macro DECLARE_REG 2-3
158 %elif ARCH_X86_64 ; memory
159 %define r%1m [rstk + stack_offset + %3]
160 %define r%1mp qword r %+ %1 %+ m
162 %define r%1m [rstk + stack_offset + %3]
163 %define r%1mp dword r %+ %1 %+ m
168 %macro DECLARE_REG_SIZE 3
184 DECLARE_REG_SIZE ax, al, ah
185 DECLARE_REG_SIZE bx, bl, bh
186 DECLARE_REG_SIZE cx, cl, ch
187 DECLARE_REG_SIZE dx, dl, dh
188 DECLARE_REG_SIZE si, sil, null
189 DECLARE_REG_SIZE di, dil, null
190 DECLARE_REG_SIZE bp, bpl, null
192 ; t# defines for when per-arch register allocation is more complex than just function arguments
194 %macro DECLARE_REG_TMP 1-*
197 CAT_XDEFINE t, %%i, r%1
203 %macro DECLARE_REG_TMP_SIZE 0-*
205 %define t%1q t%1 %+ q
206 %define t%1d t%1 %+ d
207 %define t%1w t%1 %+ w
208 %define t%1h t%1 %+ h
209 %define t%1b t%1 %+ b
214 DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
225 %assign stack_offset stack_offset+gprsize
232 %assign stack_offset stack_offset-gprsize
236 %macro PUSH_IF_USED 1-*
245 %macro POP_IF_USED 1-*
254 %macro LOAD_IF_USED 1-*
257 mov r%1, r %+ %1 %+ mp
266 %assign stack_offset stack_offset+(%2)
273 %assign stack_offset stack_offset-(%2)
283 %macro movsxdifnidn 2
295 %macro DEFINE_ARGS 0-*
299 CAT_UNDEF arg_name %+ %%i, q
300 CAT_UNDEF arg_name %+ %%i, d
301 CAT_UNDEF arg_name %+ %%i, w
302 CAT_UNDEF arg_name %+ %%i, h
303 CAT_UNDEF arg_name %+ %%i, b
304 CAT_UNDEF arg_name %+ %%i, m
305 CAT_UNDEF arg_name %+ %%i, mp
306 CAT_UNDEF arg_name, %%i
311 %xdefine %%stack_offset stack_offset
312 %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
315 %xdefine %1q r %+ %%i %+ q
316 %xdefine %1d r %+ %%i %+ d
317 %xdefine %1w r %+ %%i %+ w
318 %xdefine %1h r %+ %%i %+ h
319 %xdefine %1b r %+ %%i %+ b
320 %xdefine %1m r %+ %%i %+ m
321 %xdefine %1mp r %+ %%i %+ mp
322 CAT_XDEFINE arg_name, %%i, %1
326 %xdefine stack_offset %%stack_offset
327 %assign n_arg_names %0
330 %macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
333 %assign %%stack_alignment ((mmsize + 15) & ~15)
334 %assign stack_size %1
336 %assign stack_size -stack_size
339 %assign xmm_regs_used %2
341 %if mmsize <= 16 && HAVE_ALIGNED_STACK
342 %assign stack_size_padded stack_size + %%stack_alignment - gprsize - (stack_offset & (%%stack_alignment - 1))
343 %if xmm_regs_used > 6
344 %assign stack_size_padded stack_size_padded + (xmm_regs_used - 6) * 16
346 SUB rsp, stack_size_padded
348 %assign %%reg_num (regs_used - 1)
349 %xdefine rstk r %+ %%reg_num
350 ; align stack, and save original stack location directly above
351 ; it, i.e. in [rsp+stack_size_padded], so we can restore the
352 ; stack in a single instruction (i.e. mov rsp, rstk or mov
353 ; rsp, [rsp+stack_size_padded])
355 %assign stack_size_padded stack_size
356 %if xmm_regs_used > 6
357 %assign stack_size_padded stack_size_padded + (xmm_regs_used - 6) * 16
358 %if mmsize == 32 && xmm_regs_used & 1
359 ; re-align to 32 bytes
360 %assign stack_size_padded (stack_size_padded + 16)
363 %if %1 < 0 ; need to store rsp on stack
364 sub rsp, gprsize+stack_size_padded
365 and rsp, ~(%%stack_alignment-1)
366 %xdefine rstkm [rsp+stack_size_padded]
368 %else ; can keep rsp in rstk during whole function
369 sub rsp, stack_size_padded
370 and rsp, ~(%%stack_alignment-1)
374 %if xmm_regs_used > 6
381 %macro SETUP_STACK_POINTER 1
383 %if %1 != 0 && (HAVE_ALIGNED_STACK == 0 || mmsize == 32)
385 %assign regs_used (regs_used + 1)
386 %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2
387 %warning "Stack pointer will overwrite register argument"
393 %macro DEFINE_ARGS_INTERNAL 3+
403 %if WIN64 ; Windows x64 ;=================================================
409 DECLARE_REG 4, R10, 40
410 DECLARE_REG 5, R11, 48
411 DECLARE_REG 6, rax, 56
412 DECLARE_REG 7, rdi, 64
413 DECLARE_REG 8, rsi, 72
414 DECLARE_REG 9, rbx, 80
415 DECLARE_REG 10, rbp, 88
416 DECLARE_REG 11, R12, 96
417 DECLARE_REG 12, R13, 104
418 DECLARE_REG 13, R14, 112
419 DECLARE_REG 14, R15, 120
421 %macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
424 ASSERT regs_used >= num_args
425 SETUP_STACK_POINTER %4
426 ASSERT regs_used <= 15
427 PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
429 %if mmsize != 8 && stack_size == 0
432 LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
433 DEFINE_ARGS_INTERNAL %0, %4, %5
436 %macro WIN64_PUSH_XMM 0
437 %assign %%i xmm_regs_used
438 %rep (xmm_regs_used-6)
440 movdqa [rsp + (%%i-6)*16 + stack_size + (~stack_offset&8)], xmm %+ %%i
444 %macro WIN64_SPILL_XMM 1
445 %assign xmm_regs_used %1
446 ASSERT xmm_regs_used <= 16
447 %if xmm_regs_used > 6
448 SUB rsp, (xmm_regs_used-6)*16+16
453 %macro WIN64_RESTORE_XMM_INTERNAL 1
454 %if xmm_regs_used > 6
455 %assign %%i xmm_regs_used
456 %rep (xmm_regs_used-6)
458 movdqa xmm %+ %%i, [%1 + (%%i-6)*16+stack_size+(~stack_offset&8)]
460 %if stack_size_padded == 0
461 add %1, (xmm_regs_used-6)*16+16
464 %if stack_size_padded > 0
465 %if stack_size > 0 && (mmsize == 32 || HAVE_ALIGNED_STACK == 0)
468 add %1, stack_size_padded
473 %macro WIN64_RESTORE_XMM 1
474 WIN64_RESTORE_XMM_INTERNAL %1
475 %assign stack_offset (stack_offset-stack_size_padded)
476 %assign xmm_regs_used 0
479 %define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0
482 WIN64_RESTORE_XMM_INTERNAL rsp
483 POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
490 %elif ARCH_X86_64 ; *nix x64 ;=============================================
498 DECLARE_REG 6, rax, 8
499 DECLARE_REG 7, R10, 16
500 DECLARE_REG 8, R11, 24
501 DECLARE_REG 9, rbx, 32
502 DECLARE_REG 10, rbp, 40
503 DECLARE_REG 11, R12, 48
504 DECLARE_REG 12, R13, 56
505 DECLARE_REG 13, R14, 64
506 DECLARE_REG 14, R15, 72
508 %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
511 ASSERT regs_used >= num_args
512 SETUP_STACK_POINTER %4
513 ASSERT regs_used <= 15
514 PUSH_IF_USED 9, 10, 11, 12, 13, 14
516 LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
517 DEFINE_ARGS_INTERNAL %0, %4, %5
520 %define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
523 %if stack_size_padded > 0
524 %if mmsize == 32 || HAVE_ALIGNED_STACK == 0
527 add rsp, stack_size_padded
530 POP_IF_USED 14, 13, 12, 11, 10, 9
537 %else ; X86_32 ;==============================================================
539 DECLARE_REG 0, eax, 4
540 DECLARE_REG 1, ecx, 8
541 DECLARE_REG 2, edx, 12
542 DECLARE_REG 3, ebx, 16
543 DECLARE_REG 4, esi, 20
544 DECLARE_REG 5, edi, 24
545 DECLARE_REG 6, ebp, 28
548 %macro DECLARE_ARG 1-*
550 %define r%1m [rstk + stack_offset + 4*%1 + 4]
551 %define r%1mp dword r%1m
556 DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
558 %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
561 ASSERT regs_used >= num_args
568 SETUP_STACK_POINTER %4
569 ASSERT regs_used <= 7
570 PUSH_IF_USED 3, 4, 5, 6
572 LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
573 DEFINE_ARGS_INTERNAL %0, %4, %5
576 %define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
579 %if stack_size_padded > 0
580 %if mmsize == 32 || HAVE_ALIGNED_STACK == 0
583 add rsp, stack_size_padded
586 POP_IF_USED 6, 5, 4, 3
593 %endif ;======================================================================
596 %macro WIN64_SPILL_XMM 1
598 %macro WIN64_RESTORE_XMM 1
600 %macro WIN64_PUSH_XMM 0
612 %macro TAIL_CALL 2 ; callee, is_nonadjacent
621 ;=============================================================================
622 ; arch-independent part
623 ;=============================================================================
625 %assign function_align 16
628 ; Applies any symbol mangling needed for C linkage, and sets up a define such that
629 ; subsequent uses of the function name automatically refer to the mangled version.
630 ; Appends cpuflags to the function name if cpuflags has been specified.
631 ; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
632 ; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
633 %macro cglobal 1-2+ "" ; name, [PROLOGUE args]
634 cglobal_internal 1, %1 %+ SUFFIX, %2
636 %macro cvisible 1-2+ "" ; name, [PROLOGUE args]
637 cglobal_internal 0, %1 %+ SUFFIX, %2
639 %macro cglobal_internal 2-3+
641 %xdefine %%FUNCTION_PREFIX private_prefix
642 %xdefine %%VISIBILITY hidden
644 %xdefine %%FUNCTION_PREFIX public_prefix
645 %xdefine %%VISIBILITY
648 %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
649 %xdefine %2.skip_prologue %2 %+ .skip_prologue
650 CAT_XDEFINE cglobaled_, %2, 1
652 %xdefine current_function %2
653 %ifidn __OUTPUT_FORMAT__,elf
654 global %2:function %%VISIBILITY
660 RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
662 %assign stack_offset 0
664 %assign stack_size_padded 0
665 %assign xmm_regs_used 0
672 %xdefine %1 mangle(private_prefix %+ _ %+ %1)
673 CAT_XDEFINE cglobaled_, %1, 1
677 ; like cextern, but without the prefix
678 %macro cextern_naked 1
679 %xdefine %1 mangle(%1)
680 CAT_XDEFINE cglobaled_, %1, 1
685 %xdefine %1 mangle(private_prefix %+ _ %+ %1)
690 ; This is needed for ELF, otherwise the GNU linker assumes the stack is
691 ; executable by default.
692 %ifidn __OUTPUT_FORMAT__,elf
693 SECTION .note.GNU-stack noalloc noexec nowrite progbits
698 %assign cpuflags_mmx (1<<0)
699 %assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
700 %assign cpuflags_3dnow (1<<2) | cpuflags_mmx
701 %assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
702 %assign cpuflags_sse (1<<4) | cpuflags_mmx2
703 %assign cpuflags_sse2 (1<<5) | cpuflags_sse
704 %assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
705 %assign cpuflags_sse3 (1<<7) | cpuflags_sse2
706 %assign cpuflags_ssse3 (1<<8) | cpuflags_sse3
707 %assign cpuflags_sse4 (1<<9) | cpuflags_ssse3
708 %assign cpuflags_sse42 (1<<10)| cpuflags_sse4
709 %assign cpuflags_avx (1<<11)| cpuflags_sse42
710 %assign cpuflags_xop (1<<12)| cpuflags_avx
711 %assign cpuflags_fma4 (1<<13)| cpuflags_avx
712 %assign cpuflags_avx2 (1<<14)| cpuflags_avx
713 %assign cpuflags_fma3 (1<<15)| cpuflags_avx
715 %assign cpuflags_cache32 (1<<16)
716 %assign cpuflags_cache64 (1<<17)
717 %assign cpuflags_slowctz (1<<18)
718 %assign cpuflags_lzcnt (1<<19)
719 %assign cpuflags_misalign (1<<20)
720 %assign cpuflags_aligned (1<<21) ; not a cpu feature, but a function variant
721 %assign cpuflags_atom (1<<22)
722 %assign cpuflags_bmi1 (1<<23)
723 %assign cpuflags_bmi2 (1<<24)|cpuflags_bmi1
724 %assign cpuflags_tbm (1<<25)|cpuflags_bmi1
726 %define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
727 %define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
729 ; Takes up to 2 cpuflags from the above list.
730 ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
731 ; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
732 %macro INIT_CPUFLAGS 0-2
736 %assign cpuflags cpuflags_%1
738 %xdefine cpuname %1_%2
739 %assign cpuflags cpuflags | cpuflags_%2
741 %xdefine SUFFIX _ %+ cpuname
743 %assign avx_enabled 1
745 %if mmsize == 16 && notcpuflag(sse2)
748 %define movnta movntps
776 %assign avx_enabled 0
777 %define RESET_MM_PERMUTATION INIT_MMX %1
783 %define movnta movntq
786 CAT_XDEFINE m, %%i, mm %+ %%i
787 CAT_XDEFINE nmm, %%i, %%i
799 %assign avx_enabled 0
800 %define RESET_MM_PERMUTATION INIT_XMM %1
804 %define num_mmregs 16
809 %define movnta movntdq
812 CAT_XDEFINE m, %%i, xmm %+ %%i
813 CAT_XDEFINE nxmm, %%i, %%i
820 %assign avx_enabled 1
821 %define RESET_MM_PERMUTATION INIT_YMM %1
825 %define num_mmregs 16
830 %define movnta vmovntps
833 CAT_XDEFINE m, %%i, ymm %+ %%i
834 CAT_XDEFINE nymm, %%i, %%i
842 ; I often want to use macros that permute their arguments. e.g. there's no
843 ; efficient way to implement butterfly or transpose or dct without swapping some
846 ; I would like to not have to manually keep track of the permutations:
847 ; If I insert a permutation in the middle of a function, it should automatically
848 ; change everything that follows. For more complex macros I may also have multiple
849 ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
851 ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
852 ; permutes its arguments. It's equivalent to exchanging the contents of the
853 ; registers, except that this way you exchange the register names instead, so it
854 ; doesn't cost any cycles.
856 %macro PERMUTE 2-* ; takes a list of pairs to swap
871 %macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
877 CAT_XDEFINE n, m%1, %1
878 CAT_XDEFINE n, m%2, %2
880 ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
881 ; Be careful using this mode in nested macros though, as in some cases there may be
882 ; other copies of m# that have already been dereferenced and don't get updated correctly.
883 %xdefine %%n1 n %+ %1
884 %xdefine %%n2 n %+ %2
885 %xdefine tmp m %+ %%n1
886 CAT_XDEFINE m, %%n1, m %+ %%n2
887 CAT_XDEFINE m, %%n2, tmp
888 CAT_XDEFINE n, m %+ %%n1, %%n1
889 CAT_XDEFINE n, m %+ %%n2, %%n2
896 ; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
897 ; calls to that function will automatically load the permutation, so values can
898 ; be returned in mmregs.
899 %macro SAVE_MM_PERMUTATION 0-1
903 %xdefine %%f current_function %+ _m
907 CAT_XDEFINE %%f, %%i, m %+ %%i
912 %macro LOAD_MM_PERMUTATION 1 ; name to load from
916 CAT_XDEFINE m, %%i, %1_m %+ %%i
917 CAT_XDEFINE n, m %+ %%i, %%i
923 ; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
925 call_internal %1 %+ SUFFIX, %1
927 %macro call_internal 2
935 LOAD_MM_PERMUTATION %%i
938 ; Substitutions that reduce instruction size but are functionally equivalent
963 ;=============================================================================
964 ; AVX abstraction layer
965 ;=============================================================================
970 CAT_XDEFINE sizeofmm, i, 8
972 CAT_XDEFINE sizeofxmm, i, 16
973 CAT_XDEFINE sizeofymm, i, 32
978 %macro CHECK_AVX_INSTR_EMU 3-*
983 %error non-avx emulation of ``%%opcode'' is not supported
990 ;%2 == 1 if float, 0 if int
991 ;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
992 ;%4 == number of operands given
994 %macro RUN_AVX_INSTR 6-7+
996 %define %%sizeofreg sizeof%6
998 %define %%sizeofreg sizeof%5
1000 %define %%sizeofreg mmsize
1010 %define %%regmov movq
1012 %define %%regmov movaps
1014 %define %%regmov movdqa
1019 %if avx_enabled && %%sizeofreg==16
1022 CHECK_AVX_INSTR_EMU {%1 %5, %6, %7}, %5, %7
1037 ; 3arg AVX ops with a memory arg can only have it in src2,
1038 ; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov).
1039 ; So, if the op is symmetric and the wrong one is memory, swap them.
1040 %macro RUN_AVX_INSTR1 8
1051 %if %%swap && %3 == 0 && %8 == 1
1052 RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6
1054 RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7
1059 ;%2 == 1 if float, 0 if int
1060 ;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
1061 ;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not
1063 %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4
1065 RUN_AVX_INSTR %6, %7, %8, 2, %1, %2
1067 RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9
1069 RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4
1071 RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5
1076 AVX_INSTR addpd, 1, 0, 1
1077 AVX_INSTR addps, 1, 0, 1
1078 AVX_INSTR addsd, 1, 0, 1
1079 AVX_INSTR addss, 1, 0, 1
1080 AVX_INSTR addsubpd, 1, 0, 0
1081 AVX_INSTR addsubps, 1, 0, 0
1082 AVX_INSTR andpd, 1, 0, 1
1083 AVX_INSTR andps, 1, 0, 1
1084 AVX_INSTR andnpd, 1, 0, 0
1085 AVX_INSTR andnps, 1, 0, 0
1086 AVX_INSTR blendpd, 1, 0, 0
1087 AVX_INSTR blendps, 1, 0, 0
1088 AVX_INSTR blendvpd, 1, 0, 0
1089 AVX_INSTR blendvps, 1, 0, 0
1090 AVX_INSTR cmppd, 1, 1, 0
1091 AVX_INSTR cmpps, 1, 1, 0
1092 AVX_INSTR cmpsd, 1, 1, 0
1093 AVX_INSTR cmpss, 1, 1, 0
1094 AVX_INSTR cvtdq2ps, 1, 0, 0
1095 AVX_INSTR cvtpd2dq, 1, 0, 0
1096 AVX_INSTR cvtps2dq, 1, 0, 0
1097 AVX_INSTR divpd, 1, 0, 0
1098 AVX_INSTR divps, 1, 0, 0
1099 AVX_INSTR divsd, 1, 0, 0
1100 AVX_INSTR divss, 1, 0, 0
1101 AVX_INSTR dppd, 1, 1, 0
1102 AVX_INSTR dpps, 1, 1, 0
1103 AVX_INSTR haddpd, 1, 0, 0
1104 AVX_INSTR haddps, 1, 0, 0
1105 AVX_INSTR hsubpd, 1, 0, 0
1106 AVX_INSTR hsubps, 1, 0, 0
1107 AVX_INSTR maxpd, 1, 0, 1
1108 AVX_INSTR maxps, 1, 0, 1
1109 AVX_INSTR maxsd, 1, 0, 1
1110 AVX_INSTR maxss, 1, 0, 1
1111 AVX_INSTR minpd, 1, 0, 1
1112 AVX_INSTR minps, 1, 0, 1
1113 AVX_INSTR minsd, 1, 0, 1
1114 AVX_INSTR minss, 1, 0, 1
1115 AVX_INSTR movhlps, 1, 0, 0
1116 AVX_INSTR movlhps, 1, 0, 0
1117 AVX_INSTR movsd, 1, 0, 0
1118 AVX_INSTR movss, 1, 0, 0
1119 AVX_INSTR mpsadbw, 0, 1, 0
1120 AVX_INSTR mulpd, 1, 0, 1
1121 AVX_INSTR mulps, 1, 0, 1
1122 AVX_INSTR mulsd, 1, 0, 1
1123 AVX_INSTR mulss, 1, 0, 1
1124 AVX_INSTR orpd, 1, 0, 1
1125 AVX_INSTR orps, 1, 0, 1
1126 AVX_INSTR pabsb, 0, 0, 0
1127 AVX_INSTR pabsw, 0, 0, 0
1128 AVX_INSTR pabsd, 0, 0, 0
1129 AVX_INSTR packsswb, 0, 0, 0
1130 AVX_INSTR packssdw, 0, 0, 0
1131 AVX_INSTR packuswb, 0, 0, 0
1132 AVX_INSTR packusdw, 0, 0, 0
1133 AVX_INSTR paddb, 0, 0, 1
1134 AVX_INSTR paddw, 0, 0, 1
1135 AVX_INSTR paddd, 0, 0, 1
1136 AVX_INSTR paddq, 0, 0, 1
1137 AVX_INSTR paddsb, 0, 0, 1
1138 AVX_INSTR paddsw, 0, 0, 1
1139 AVX_INSTR paddusb, 0, 0, 1
1140 AVX_INSTR paddusw, 0, 0, 1
1141 AVX_INSTR palignr, 0, 1, 0
1142 AVX_INSTR pand, 0, 0, 1
1143 AVX_INSTR pandn, 0, 0, 0
1144 AVX_INSTR pavgb, 0, 0, 1
1145 AVX_INSTR pavgw, 0, 0, 1
1146 AVX_INSTR pblendvb, 0, 0, 0
1147 AVX_INSTR pblendw, 0, 1, 0
1148 AVX_INSTR pcmpestri, 0, 0, 0
1149 AVX_INSTR pcmpestrm, 0, 0, 0
1150 AVX_INSTR pcmpistri, 0, 0, 0
1151 AVX_INSTR pcmpistrm, 0, 0, 0
1152 AVX_INSTR pcmpeqb, 0, 0, 1
1153 AVX_INSTR pcmpeqw, 0, 0, 1
1154 AVX_INSTR pcmpeqd, 0, 0, 1
1155 AVX_INSTR pcmpeqq, 0, 0, 1
1156 AVX_INSTR pcmpgtb, 0, 0, 0
1157 AVX_INSTR pcmpgtw, 0, 0, 0
1158 AVX_INSTR pcmpgtd, 0, 0, 0
1159 AVX_INSTR pcmpgtq, 0, 0, 0
1160 AVX_INSTR phaddw, 0, 0, 0
1161 AVX_INSTR phaddd, 0, 0, 0
1162 AVX_INSTR phaddsw, 0, 0, 0
1163 AVX_INSTR phsubw, 0, 0, 0
1164 AVX_INSTR phsubd, 0, 0, 0
1165 AVX_INSTR phsubsw, 0, 0, 0
1166 AVX_INSTR pmaddwd, 0, 0, 1
1167 AVX_INSTR pmaddubsw, 0, 0, 0
1168 AVX_INSTR pmaxsb, 0, 0, 1
1169 AVX_INSTR pmaxsw, 0, 0, 1
1170 AVX_INSTR pmaxsd, 0, 0, 1
1171 AVX_INSTR pmaxub, 0, 0, 1
1172 AVX_INSTR pmaxuw, 0, 0, 1
1173 AVX_INSTR pmaxud, 0, 0, 1
1174 AVX_INSTR pminsb, 0, 0, 1
1175 AVX_INSTR pminsw, 0, 0, 1
1176 AVX_INSTR pminsd, 0, 0, 1
1177 AVX_INSTR pminub, 0, 0, 1
1178 AVX_INSTR pminuw, 0, 0, 1
1179 AVX_INSTR pminud, 0, 0, 1
1180 AVX_INSTR pmovmskb, 0, 0, 0
1181 AVX_INSTR pmulhuw, 0, 0, 1
1182 AVX_INSTR pmulhrsw, 0, 0, 1
1183 AVX_INSTR pmulhw, 0, 0, 1
1184 AVX_INSTR pmullw, 0, 0, 1
1185 AVX_INSTR pmulld, 0, 0, 1
1186 AVX_INSTR pmuludq, 0, 0, 1
1187 AVX_INSTR pmuldq, 0, 0, 1
1188 AVX_INSTR por, 0, 0, 1
1189 AVX_INSTR psadbw, 0, 0, 1
1190 AVX_INSTR pshufb, 0, 0, 0
1191 AVX_INSTR pshufd, 0, 1, 0
1192 AVX_INSTR pshufhw, 0, 1, 0
1193 AVX_INSTR pshuflw, 0, 1, 0
1194 AVX_INSTR psignb, 0, 0, 0
1195 AVX_INSTR psignw, 0, 0, 0
1196 AVX_INSTR psignd, 0, 0, 0
1197 AVX_INSTR psllw, 0, 0, 0
1198 AVX_INSTR pslld, 0, 0, 0
1199 AVX_INSTR psllq, 0, 0, 0
1200 AVX_INSTR pslldq, 0, 0, 0
1201 AVX_INSTR psraw, 0, 0, 0
1202 AVX_INSTR psrad, 0, 0, 0
1203 AVX_INSTR psrlw, 0, 0, 0
1204 AVX_INSTR psrld, 0, 0, 0
1205 AVX_INSTR psrlq, 0, 0, 0
1206 AVX_INSTR psrldq, 0, 0, 0
1207 AVX_INSTR psubb, 0, 0, 0
1208 AVX_INSTR psubw, 0, 0, 0
1209 AVX_INSTR psubd, 0, 0, 0
1210 AVX_INSTR psubq, 0, 0, 0
1211 AVX_INSTR psubsb, 0, 0, 0
1212 AVX_INSTR psubsw, 0, 0, 0
1213 AVX_INSTR psubusb, 0, 0, 0
1214 AVX_INSTR psubusw, 0, 0, 0
1215 AVX_INSTR ptest, 0, 0, 0
1216 AVX_INSTR punpckhbw, 0, 0, 0
1217 AVX_INSTR punpckhwd, 0, 0, 0
1218 AVX_INSTR punpckhdq, 0, 0, 0
1219 AVX_INSTR punpckhqdq, 0, 0, 0
1220 AVX_INSTR punpcklbw, 0, 0, 0
1221 AVX_INSTR punpcklwd, 0, 0, 0
1222 AVX_INSTR punpckldq, 0, 0, 0
1223 AVX_INSTR punpcklqdq, 0, 0, 0
1224 AVX_INSTR pxor, 0, 0, 1
1225 AVX_INSTR shufps, 1, 1, 0
1226 AVX_INSTR subpd, 1, 0, 0
1227 AVX_INSTR subps, 1, 0, 0
1228 AVX_INSTR subsd, 1, 0, 0
1229 AVX_INSTR subss, 1, 0, 0
1230 AVX_INSTR unpckhpd, 1, 0, 0
1231 AVX_INSTR unpckhps, 1, 0, 0
1232 AVX_INSTR unpcklpd, 1, 0, 0
1233 AVX_INSTR unpcklps, 1, 0, 0
1234 AVX_INSTR xorpd, 1, 0, 1
1235 AVX_INSTR xorps, 1, 0, 1
1237 ; 3DNow instructions, for sharing code between AVX, SSE and 3DN
1238 AVX_INSTR pfadd, 1, 0, 1
1239 AVX_INSTR pfsub, 1, 0, 0
1240 AVX_INSTR pfmul, 1, 0, 1
1242 ; base-4 constants for shuffles
1245 %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
1247 CAT_XDEFINE q000, j, i
1249 CAT_XDEFINE q00, j, i
1251 CAT_XDEFINE q0, j, i
1261 %macro %1 5-8 %1, %2, %3
1262 %if cpuflag(xop) || cpuflag(fma4)
1276 FMA_INSTR fmaddps, mulps, addps
1277 FMA_INSTR pmacsdd, pmulld, paddd
1278 FMA_INSTR pmacsww, pmullw, paddw
1279 FMA_INSTR pmadcswd, pmaddwd, paddd
1281 ; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
1282 ; This lets us use tzcnt without bumping the yasm version requirement yet.
1283 %define tzcnt rep bsf