1 ;*****************************************************************************
2 ;* x86inc.asm: x264asm abstraction layer
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2013 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Anton Mitrofanov <BugMaster@narod.ru>
8 ;* Fiona Glaser <fiona@x264.com>
9 ;* Henrik Gramner <hengar-6@student.ltu.se>
11 ;* Permission to use, copy, modify, and/or distribute this software for any
12 ;* purpose with or without fee is hereby granted, provided that the above
13 ;* copyright notice and this permission notice appear in all copies.
15 ;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
16 ;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17 ;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
18 ;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19 ;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ;*****************************************************************************
24 ; This is a header file for the x264ASM assembly language, which uses
25 ; NASM/YASM syntax combined with a large number of macros to provide easy
26 ; abstraction between different calling conventions (x86_32, win64, linux64).
27 ; It also has various other useful features to simplify writing the kind of
28 ; DSP functions that are most often used in x264.
30 ; Unlike the rest of x264, this file is available under an ISC license, as it
31 ; has significant usefulness outside of x264 and we want it to be available
32 ; to the largest audience possible. Of course, if you modify it for your own
33 ; purposes to add a new feature, we strongly encourage contributing a patch
34 ; as this feature might be useful for others as well. Send patches or ideas
35 ; to x264-devel@videolan.org .
37 %ifndef private_prefix
38 %define private_prefix x264
42 %define public_prefix private_prefix
48 %ifidn __OUTPUT_FORMAT__,win32
50 %elifidn __OUTPUT_FORMAT__,win64
58 %define mangle(x) _ %+ x
63 ; Name of the .rodata section.
64 ; Kludge: Something on OS X fails to align .rodata even given an align attribute,
65 ; so use a different read-only section.
66 %macro SECTION_RODATA 0-1 16
67 %ifidn __OUTPUT_FORMAT__,macho64
68 SECTION .text align=%1
69 %elifidn __OUTPUT_FORMAT__,macho
70 SECTION .text align=%1
72 %elifidn __OUTPUT_FORMAT__,aout
75 SECTION .rodata align=%1
79 ; aout does not support align=
80 %macro SECTION_TEXT 0-1 16
81 %ifidn __OUTPUT_FORMAT__,aout
84 SECTION .text align=%1
90 %elif ARCH_X86_64 == 0
91 ; x86_32 doesn't require PIC.
92 ; Some distros prefer shared objects to be PIC, but nothing breaks if
93 ; the code contains a few textrels, so we'll skip that complexity.
100 ; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
103 ; Macros to eliminate most code duplication between x86_32 and x86_64:
104 ; Currently this works only for leaf functions which load all their arguments
105 ; into registers at the start, and make no other use of the stack. Luckily that
106 ; covers most of x264's asm.
109 ; %1 = number of arguments. loads them from stack if needed.
110 ; %2 = number of registers used. pushes callee-saved regs if needed.
111 ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
112 ; %4 = (optional) stack size to be allocated. If not aligned (x86-32 ICC 10.x,
113 ; MSVC or YMM), the stack will be manually aligned (to 16 or 32 bytes),
114 ; and an extra register will be allocated to hold the original stack
115 ; pointer (to not invalidate r0m etc.). To prevent the use of an extra
116 ; register as stack pointer, request a negative stack size.
117 ; %4+/%5+ = list of names to define to registers
118 ; PROLOGUE can also be invoked by adding the same options to cglobal
121 ; cglobal foo, 2,3,0, dst, src, tmp
122 ; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
124 ; TODO Some functions can use some args directly from the stack. If they're the
125 ; last args then you can just not declare them, but if they're in the middle
126 ; we need more flexible macro.
129 ; Pops anything that was pushed by PROLOGUE, and returns.
132 ; Use this instead of RET if it's a branch target.
135 ; rN and rNq are the native-size register holding function argument N
136 ; rNd, rNw, rNb are dword, word, and byte size
137 ; rNh is the high 8 bits of the word size
138 ; rNm is the original location of arg N (a register or on the stack), dword
139 ; rNmp is native size
141 %macro DECLARE_REG 2-3
150 %elif ARCH_X86_64 ; memory
151 %define r%1m [rstk + stack_offset + %3]
152 %define r%1mp qword r %+ %1 %+ m
154 %define r%1m [rstk + stack_offset + %3]
155 %define r%1mp dword r %+ %1 %+ m
160 %macro DECLARE_REG_SIZE 3
176 DECLARE_REG_SIZE ax, al, ah
177 DECLARE_REG_SIZE bx, bl, bh
178 DECLARE_REG_SIZE cx, cl, ch
179 DECLARE_REG_SIZE dx, dl, dh
180 DECLARE_REG_SIZE si, sil, null
181 DECLARE_REG_SIZE di, dil, null
182 DECLARE_REG_SIZE bp, bpl, null
184 ; t# defines for when per-arch register allocation is more complex than just function arguments
186 %macro DECLARE_REG_TMP 1-*
189 CAT_XDEFINE t, %%i, r%1
195 %macro DECLARE_REG_TMP_SIZE 0-*
197 %define t%1q t%1 %+ q
198 %define t%1d t%1 %+ d
199 %define t%1w t%1 %+ w
200 %define t%1h t%1 %+ h
201 %define t%1b t%1 %+ b
206 DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
217 %assign stack_offset stack_offset+gprsize
224 %assign stack_offset stack_offset-gprsize
228 %macro PUSH_IF_USED 1-*
237 %macro POP_IF_USED 1-*
246 %macro LOAD_IF_USED 1-*
249 mov r%1, r %+ %1 %+ mp
258 %assign stack_offset stack_offset+(%2)
265 %assign stack_offset stack_offset-(%2)
275 %macro movsxdifnidn 2
287 %macro DEFINE_ARGS 0-*
291 CAT_UNDEF arg_name %+ %%i, q
292 CAT_UNDEF arg_name %+ %%i, d
293 CAT_UNDEF arg_name %+ %%i, w
294 CAT_UNDEF arg_name %+ %%i, h
295 CAT_UNDEF arg_name %+ %%i, b
296 CAT_UNDEF arg_name %+ %%i, m
297 CAT_UNDEF arg_name %+ %%i, mp
298 CAT_UNDEF arg_name, %%i
303 %xdefine %%stack_offset stack_offset
304 %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
307 %xdefine %1q r %+ %%i %+ q
308 %xdefine %1d r %+ %%i %+ d
309 %xdefine %1w r %+ %%i %+ w
310 %xdefine %1h r %+ %%i %+ h
311 %xdefine %1b r %+ %%i %+ b
312 %xdefine %1m r %+ %%i %+ m
313 %xdefine %1mp r %+ %%i %+ mp
314 CAT_XDEFINE arg_name, %%i, %1
318 %xdefine stack_offset %%stack_offset
319 %assign n_arg_names %0
322 %macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
325 %assign %%stack_alignment ((mmsize + 15) & ~15)
326 %assign stack_size %1
328 %assign stack_size -stack_size
331 %assign xmm_regs_used %2
333 %if mmsize <= 16 && HAVE_ALIGNED_STACK
334 %assign stack_size_padded stack_size + %%stack_alignment - gprsize - (stack_offset & (%%stack_alignment - 1))
335 %if xmm_regs_used > 6
336 %assign stack_size_padded stack_size_padded + (xmm_regs_used - 6) * 16
338 SUB rsp, stack_size_padded
340 %assign %%reg_num (regs_used - 1)
341 %xdefine rstk r %+ %%reg_num
342 ; align stack, and save original stack location directly above
343 ; it, i.e. in [rsp+stack_size_padded], so we can restore the
344 ; stack in a single instruction (i.e. mov rsp, rstk or mov
345 ; rsp, [rsp+stack_size_padded])
347 %assign stack_size_padded stack_size
348 %if xmm_regs_used > 6
349 %assign stack_size_padded stack_size_padded + (xmm_regs_used - 6) * 16
350 %if mmsize == 32 && xmm_regs_used & 1
351 ; re-align to 32 bytes
352 %assign stack_size_padded (stack_size_padded + 16)
355 %if %1 < 0 ; need to store rsp on stack
356 sub rsp, gprsize+stack_size_padded
357 and rsp, ~(%%stack_alignment-1)
358 %xdefine rstkm [rsp+stack_size_padded]
360 %else ; can keep rsp in rstk during whole function
361 sub rsp, stack_size_padded
362 and rsp, ~(%%stack_alignment-1)
366 %if xmm_regs_used > 6
373 %macro SETUP_STACK_POINTER 1
375 %if %1 != 0 && (HAVE_ALIGNED_STACK == 0 || mmsize == 32)
377 %assign regs_used (regs_used + 1)
378 %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2
379 %warning "Stack pointer will overwrite register argument"
385 %macro DEFINE_ARGS_INTERNAL 3+
395 %if WIN64 ; Windows x64 ;=================================================
401 DECLARE_REG 4, R10, 40
402 DECLARE_REG 5, R11, 48
403 DECLARE_REG 6, rax, 56
404 DECLARE_REG 7, rdi, 64
405 DECLARE_REG 8, rsi, 72
406 DECLARE_REG 9, rbx, 80
407 DECLARE_REG 10, rbp, 88
408 DECLARE_REG 11, R12, 96
409 DECLARE_REG 12, R13, 104
410 DECLARE_REG 13, R14, 112
411 DECLARE_REG 14, R15, 120
413 %macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
416 ASSERT regs_used >= num_args
417 SETUP_STACK_POINTER %4
418 ASSERT regs_used <= 15
419 PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
421 %if mmsize != 8 && stack_size == 0
424 LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
425 DEFINE_ARGS_INTERNAL %0, %4, %5
428 %macro WIN64_PUSH_XMM 0
429 %assign %%i xmm_regs_used
430 %rep (xmm_regs_used-6)
432 movaps [rsp + (%%i-6)*16 + stack_size + (~stack_offset&8)], xmm %+ %%i
436 %macro WIN64_SPILL_XMM 1
437 %assign xmm_regs_used %1
438 ASSERT xmm_regs_used <= 16
439 %if xmm_regs_used > 6
440 SUB rsp, (xmm_regs_used-6)*16+16
445 %macro WIN64_RESTORE_XMM_INTERNAL 1
446 %if xmm_regs_used > 6
447 %assign %%i xmm_regs_used
448 %rep (xmm_regs_used-6)
450 movaps xmm %+ %%i, [%1 + (%%i-6)*16+stack_size+(~stack_offset&8)]
452 %if stack_size_padded == 0
453 add %1, (xmm_regs_used-6)*16+16
456 %if stack_size_padded > 0
457 %if stack_size > 0 && (mmsize == 32 || HAVE_ALIGNED_STACK == 0)
460 add %1, stack_size_padded
465 %macro WIN64_RESTORE_XMM 1
466 WIN64_RESTORE_XMM_INTERNAL %1
467 %assign stack_offset (stack_offset-stack_size_padded)
468 %assign xmm_regs_used 0
471 %define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0
474 WIN64_RESTORE_XMM_INTERNAL rsp
475 POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
482 %elif ARCH_X86_64 ; *nix x64 ;=============================================
490 DECLARE_REG 6, rax, 8
491 DECLARE_REG 7, R10, 16
492 DECLARE_REG 8, R11, 24
493 DECLARE_REG 9, rbx, 32
494 DECLARE_REG 10, rbp, 40
495 DECLARE_REG 11, R12, 48
496 DECLARE_REG 12, R13, 56
497 DECLARE_REG 13, R14, 64
498 DECLARE_REG 14, R15, 72
500 %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
503 ASSERT regs_used >= num_args
504 SETUP_STACK_POINTER %4
505 ASSERT regs_used <= 15
506 PUSH_IF_USED 9, 10, 11, 12, 13, 14
508 LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
509 DEFINE_ARGS_INTERNAL %0, %4, %5
512 %define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
515 %if stack_size_padded > 0
516 %if mmsize == 32 || HAVE_ALIGNED_STACK == 0
519 add rsp, stack_size_padded
522 POP_IF_USED 14, 13, 12, 11, 10, 9
529 %else ; X86_32 ;==============================================================
531 DECLARE_REG 0, eax, 4
532 DECLARE_REG 1, ecx, 8
533 DECLARE_REG 2, edx, 12
534 DECLARE_REG 3, ebx, 16
535 DECLARE_REG 4, esi, 20
536 DECLARE_REG 5, edi, 24
537 DECLARE_REG 6, ebp, 28
540 %macro DECLARE_ARG 1-*
542 %define r%1m [rstk + stack_offset + 4*%1 + 4]
543 %define r%1mp dword r%1m
548 DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
550 %macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
553 ASSERT regs_used >= num_args
560 SETUP_STACK_POINTER %4
561 ASSERT regs_used <= 7
562 PUSH_IF_USED 3, 4, 5, 6
564 LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
565 DEFINE_ARGS_INTERNAL %0, %4, %5
568 %define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
571 %if stack_size_padded > 0
572 %if mmsize == 32 || HAVE_ALIGNED_STACK == 0
575 add rsp, stack_size_padded
578 POP_IF_USED 6, 5, 4, 3
585 %endif ;======================================================================
588 %macro WIN64_SPILL_XMM 1
590 %macro WIN64_RESTORE_XMM 1
592 %macro WIN64_PUSH_XMM 0
596 ; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
597 ; a branch or a branch target. So switch to a 2-byte form of ret in that case.
598 ; We can automatically detect "follows a branch", but not a branch target.
599 ; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
608 %define last_branch_adr $$
609 %macro AUTO_REP_RET 0
611 times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ != last_branch_adr.
612 %elif notcpuflag(ssse3)
613 times ((last_branch_adr-$)>>31)+1 rep
618 %macro BRANCH_INSTR 0-*
623 %xdefine last_branch_adr %%branch_instr
629 BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp
631 %macro TAIL_CALL 2 ; callee, is_nonadjacent
640 ;=============================================================================
641 ; arch-independent part
642 ;=============================================================================
644 %assign function_align 16
647 ; Applies any symbol mangling needed for C linkage, and sets up a define such that
648 ; subsequent uses of the function name automatically refer to the mangled version.
649 ; Appends cpuflags to the function name if cpuflags has been specified.
650 ; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
651 ; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
652 %macro cglobal 1-2+ "" ; name, [PROLOGUE args]
653 cglobal_internal 1, %1 %+ SUFFIX, %2
655 %macro cvisible 1-2+ "" ; name, [PROLOGUE args]
656 cglobal_internal 0, %1 %+ SUFFIX, %2
658 %macro cglobal_internal 2-3+
660 %xdefine %%FUNCTION_PREFIX private_prefix
661 %xdefine %%VISIBILITY hidden
663 %xdefine %%FUNCTION_PREFIX public_prefix
664 %xdefine %%VISIBILITY
667 %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
668 %xdefine %2.skip_prologue %2 %+ .skip_prologue
669 CAT_XDEFINE cglobaled_, %2, 1
671 %xdefine current_function %2
672 %ifidn __OUTPUT_FORMAT__,elf
673 global %2:function %%VISIBILITY
679 RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
681 %assign stack_offset 0
683 %assign stack_size_padded 0
684 %assign xmm_regs_used 0
691 %xdefine %1 mangle(private_prefix %+ _ %+ %1)
692 CAT_XDEFINE cglobaled_, %1, 1
696 ; like cextern, but without the prefix
697 %macro cextern_naked 1
698 %xdefine %1 mangle(%1)
699 CAT_XDEFINE cglobaled_, %1, 1
704 %xdefine %1 mangle(private_prefix %+ _ %+ %1)
705 %ifidn __OUTPUT_FORMAT__,elf
706 global %1:data hidden
713 ; This is needed for ELF, otherwise the GNU linker assumes the stack is
714 ; executable by default.
715 %ifidn __OUTPUT_FORMAT__,elf
716 SECTION .note.GNU-stack noalloc noexec nowrite progbits
721 %assign cpuflags_mmx (1<<0)
722 %assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
723 %assign cpuflags_3dnow (1<<2) | cpuflags_mmx
724 %assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
725 %assign cpuflags_sse (1<<4) | cpuflags_mmx2
726 %assign cpuflags_sse2 (1<<5) | cpuflags_sse
727 %assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
728 %assign cpuflags_sse3 (1<<7) | cpuflags_sse2
729 %assign cpuflags_ssse3 (1<<8) | cpuflags_sse3
730 %assign cpuflags_sse4 (1<<9) | cpuflags_ssse3
731 %assign cpuflags_sse42 (1<<10)| cpuflags_sse4
732 %assign cpuflags_avx (1<<11)| cpuflags_sse42
733 %assign cpuflags_xop (1<<12)| cpuflags_avx
734 %assign cpuflags_fma4 (1<<13)| cpuflags_avx
735 %assign cpuflags_avx2 (1<<14)| cpuflags_avx
736 %assign cpuflags_fma3 (1<<15)| cpuflags_avx
738 %assign cpuflags_cache32 (1<<16)
739 %assign cpuflags_cache64 (1<<17)
740 %assign cpuflags_slowctz (1<<18)
741 %assign cpuflags_lzcnt (1<<19)
742 %assign cpuflags_misalign (1<<20)
743 %assign cpuflags_aligned (1<<21) ; not a cpu feature, but a function variant
744 %assign cpuflags_atom (1<<22)
745 %assign cpuflags_bmi1 (1<<23)
746 %assign cpuflags_bmi2 (1<<24)|cpuflags_bmi1
747 %assign cpuflags_tbm (1<<25)|cpuflags_bmi1
749 %define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
750 %define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
752 ; Takes up to 2 cpuflags from the above list.
753 ; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
754 ; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
755 %macro INIT_CPUFLAGS 0-2
759 %assign cpuflags cpuflags_%1
761 %xdefine cpuname %1_%2
762 %assign cpuflags cpuflags | cpuflags_%2
764 %xdefine SUFFIX _ %+ cpuname
766 %assign avx_enabled 1
768 %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2))
771 %define movnta movntps
778 %if ARCH_X86_64 == 0 && notcpuflag(sse2)
789 ; m# is a simd regsiter of the currently selected size
790 ; xm# is the corresponding xmmreg (if selcted xmm or ymm size), or mmreg (if selected mmx)
791 ; ym# is the corresponding ymmreg (if selcted xmm or ymm size), or mmreg (if selected mmx)
792 ; (All 3 remain in sync through SWAP.)
803 %assign avx_enabled 0
804 %define RESET_MM_PERMUTATION INIT_MMX %1
810 %define movnta movntq
813 CAT_XDEFINE m, %%i, mm %+ %%i
814 CAT_XDEFINE nmm, %%i, %%i
826 %assign avx_enabled 0
827 %define RESET_MM_PERMUTATION INIT_XMM %1
831 %define num_mmregs 16
836 %define movnta movntdq
839 CAT_XDEFINE m, %%i, xmm %+ %%i
840 CAT_XDEFINE nxmm, %%i, %%i
847 %assign avx_enabled 1
848 %define RESET_MM_PERMUTATION INIT_YMM %1
852 %define num_mmregs 16
857 %define movnta movntdq
860 CAT_XDEFINE m, %%i, ymm %+ %%i
861 CAT_XDEFINE nymm, %%i, %%i
869 %macro DECLARE_MMCAST 1
874 %define xmmxmm%1 xmm%1
875 %define xmmymm%1 xmm%1
877 %define ymmxmm%1 ymm%1
878 %define ymmymm%1 ymm%1
879 %define xm%1 xmm %+ m%1
880 %define ym%1 ymm %+ m%1
889 ; I often want to use macros that permute their arguments. e.g. there's no
890 ; efficient way to implement butterfly or transpose or dct without swapping some
893 ; I would like to not have to manually keep track of the permutations:
894 ; If I insert a permutation in the middle of a function, it should automatically
895 ; change everything that follows. For more complex macros I may also have multiple
896 ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
898 ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
899 ; permutes its arguments. It's equivalent to exchanging the contents of the
900 ; registers, except that this way you exchange the register names instead, so it
901 ; doesn't cost any cycles.
903 %macro PERMUTE 2-* ; takes a list of pairs to swap
910 CAT_XDEFINE n, m%1, %1
915 %macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
916 %ifnum %1 ; SWAP 0, 1, ...
917 SWAP_INTERNAL_NUM %1, %2
918 %else ; SWAP m0, m1, ...
919 SWAP_INTERNAL_NAME %1, %2
923 %macro SWAP_INTERNAL_NUM 2-*
928 CAT_XDEFINE n, m%1, %1
929 CAT_XDEFINE n, m%2, %2
934 %macro SWAP_INTERNAL_NAME 2-*
935 %xdefine %%args n %+ %1
937 %xdefine %%args %%args, n %+ %2
940 SWAP_INTERNAL_NUM %%args
943 ; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
944 ; calls to that function will automatically load the permutation, so values can
945 ; be returned in mmregs.
946 %macro SAVE_MM_PERMUTATION 0-1
950 %xdefine %%f current_function %+ _m
954 CAT_XDEFINE %%f, %%i, m %+ %%i
959 %macro LOAD_MM_PERMUTATION 1 ; name to load from
963 CAT_XDEFINE m, %%i, %1_m %+ %%i
964 CAT_XDEFINE n, m %+ %%i, %%i
970 ; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
972 call_internal %1, %1 %+ SUFFIX
974 %macro call_internal 2
982 LOAD_MM_PERMUTATION %%i
985 ; Substitutions that reduce instruction size but are functionally equivalent
1010 ;=============================================================================
1011 ; AVX abstraction layer
1012 ;=============================================================================
1017 CAT_XDEFINE sizeofmm, i, 8
1019 CAT_XDEFINE sizeofxmm, i, 16
1020 CAT_XDEFINE sizeofymm, i, 32
1025 %macro CHECK_AVX_INSTR_EMU 3-*
1026 %xdefine %%opcode %1
1030 %error non-avx emulation of ``%%opcode'' is not supported
1037 ;%2 == 1 if float, 0 if int
1038 ;%3 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
1039 ;%4 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
1041 %macro RUN_AVX_INSTR 5-8+
1043 %assign %%sizeofreg sizeof%6
1045 %assign %%sizeofreg sizeof%5
1047 %assign %%sizeofreg mmsize
1049 %assign %%emulate_avx 0
1050 %if avx_enabled && %%sizeofreg >= 16
1051 %xdefine %%instr v%1
1055 %assign %%emulate_avx 1
1064 CHECK_AVX_INSTR_EMU {%1 %5, %6, %7, %8}, %5, %7, %8
1066 CHECK_AVX_INSTR_EMU {%1 %5, %6, %7}, %5, %7
1070 ; 3-operand AVX instructions with a memory arg can only have it in src2,
1071 ; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
1072 ; So, if the instruction is commutative with a memory arg, swap them.
1077 %if %%sizeofreg == 8
1091 %%instr %5, %6, %7, %8
1102 ;%2 == 1 if float, 0 if int
1103 ;%3 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
1104 ;%4 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
1105 %macro AVX_INSTR 1-4 0, 1, 0
1106 %macro %1 1-9 fnord, fnord, fnord, fnord, %1, %2, %3, %4
1108 RUN_AVX_INSTR %6, %7, %8, %9, %1
1110 RUN_AVX_INSTR %6, %7, %8, %9, %1, %2
1112 RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3
1114 RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3, %4
1116 RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3, %4, %5
1121 ; Instructions with both VEX and non-VEX encodings
1122 ; Non-destructive instructions are written without parameters
1123 AVX_INSTR addpd, 1, 0, 1
1124 AVX_INSTR addps, 1, 0, 1
1125 AVX_INSTR addsd, 1, 0, 1
1126 AVX_INSTR addss, 1, 0, 1
1127 AVX_INSTR addsubpd, 1, 0, 0
1128 AVX_INSTR addsubps, 1, 0, 0
1129 AVX_INSTR aesdec, 0, 0, 0
1130 AVX_INSTR aesdeclast, 0, 0, 0
1131 AVX_INSTR aesenc, 0, 0, 0
1132 AVX_INSTR aesenclast, 0, 0, 0
1134 AVX_INSTR aeskeygenassist
1135 AVX_INSTR andnpd, 1, 0, 0
1136 AVX_INSTR andnps, 1, 0, 0
1137 AVX_INSTR andpd, 1, 0, 1
1138 AVX_INSTR andps, 1, 0, 1
1139 AVX_INSTR blendpd, 1, 0, 0
1140 AVX_INSTR blendps, 1, 0, 0
1141 AVX_INSTR blendvpd, 1, 0, 0
1142 AVX_INSTR blendvps, 1, 0, 0
1143 AVX_INSTR cmppd, 1, 1, 0
1144 AVX_INSTR cmpps, 1, 1, 0
1145 AVX_INSTR cmpsd, 1, 1, 0
1146 AVX_INSTR cmpss, 1, 1, 0
1165 AVX_INSTR divpd, 1, 0, 0
1166 AVX_INSTR divps, 1, 0, 0
1167 AVX_INSTR divsd, 1, 0, 0
1168 AVX_INSTR divss, 1, 0, 0
1169 AVX_INSTR dppd, 1, 1, 0
1170 AVX_INSTR dpps, 1, 1, 0
1172 AVX_INSTR haddpd, 1, 0, 0
1173 AVX_INSTR haddps, 1, 0, 0
1174 AVX_INSTR hsubpd, 1, 0, 0
1175 AVX_INSTR hsubps, 1, 0, 0
1176 AVX_INSTR insertps, 1, 1, 0
1179 AVX_INSTR maskmovdqu
1180 AVX_INSTR maxpd, 1, 0, 1
1181 AVX_INSTR maxps, 1, 0, 1
1182 AVX_INSTR maxsd, 1, 0, 1
1183 AVX_INSTR maxss, 1, 0, 1
1184 AVX_INSTR minpd, 1, 0, 1
1185 AVX_INSTR minps, 1, 0, 1
1186 AVX_INSTR minsd, 1, 0, 1
1187 AVX_INSTR minss, 1, 0, 1
1194 AVX_INSTR movhlps, 1, 0, 0
1195 AVX_INSTR movhpd, 1, 0, 0
1196 AVX_INSTR movhps, 1, 0, 0
1197 AVX_INSTR movlhps, 1, 0, 0
1198 AVX_INSTR movlpd, 1, 0, 0
1199 AVX_INSTR movlps, 1, 0, 0
1207 AVX_INSTR movsd, 1, 0, 0
1210 AVX_INSTR movss, 1, 0, 0
1213 AVX_INSTR mpsadbw, 0, 1, 0
1214 AVX_INSTR mulpd, 1, 0, 1
1215 AVX_INSTR mulps, 1, 0, 1
1216 AVX_INSTR mulsd, 1, 0, 1
1217 AVX_INSTR mulss, 1, 0, 1
1218 AVX_INSTR orpd, 1, 0, 1
1219 AVX_INSTR orps, 1, 0, 1
1223 AVX_INSTR packsswb, 0, 0, 0
1224 AVX_INSTR packssdw, 0, 0, 0
1225 AVX_INSTR packuswb, 0, 0, 0
1226 AVX_INSTR packusdw, 0, 0, 0
1227 AVX_INSTR paddb, 0, 0, 1
1228 AVX_INSTR paddw, 0, 0, 1
1229 AVX_INSTR paddd, 0, 0, 1
1230 AVX_INSTR paddq, 0, 0, 1
1231 AVX_INSTR paddsb, 0, 0, 1
1232 AVX_INSTR paddsw, 0, 0, 1
1233 AVX_INSTR paddusb, 0, 0, 1
1234 AVX_INSTR paddusw, 0, 0, 1
1235 AVX_INSTR palignr, 0, 1, 0
1236 AVX_INSTR pand, 0, 0, 1
1237 AVX_INSTR pandn, 0, 0, 0
1238 AVX_INSTR pavgb, 0, 0, 1
1239 AVX_INSTR pavgw, 0, 0, 1
1240 AVX_INSTR pblendvb, 0, 0, 0
1241 AVX_INSTR pblendw, 0, 1, 0
1242 AVX_INSTR pclmulqdq, 0, 1, 0
1247 AVX_INSTR pcmpeqb, 0, 0, 1
1248 AVX_INSTR pcmpeqw, 0, 0, 1
1249 AVX_INSTR pcmpeqd, 0, 0, 1
1250 AVX_INSTR pcmpeqq, 0, 0, 1
1251 AVX_INSTR pcmpgtb, 0, 0, 0
1252 AVX_INSTR pcmpgtw, 0, 0, 0
1253 AVX_INSTR pcmpgtd, 0, 0, 0
1254 AVX_INSTR pcmpgtq, 0, 0, 0
1259 AVX_INSTR phaddw, 0, 0, 0
1260 AVX_INSTR phaddd, 0, 0, 0
1261 AVX_INSTR phaddsw, 0, 0, 0
1262 AVX_INSTR phminposuw
1263 AVX_INSTR phsubw, 0, 0, 0
1264 AVX_INSTR phsubd, 0, 0, 0
1265 AVX_INSTR phsubsw, 0, 0, 0
1266 AVX_INSTR pinsrb, 0, 1, 0
1267 AVX_INSTR pinsrd, 0, 1, 0
1268 AVX_INSTR pinsrq, 0, 1, 0
1269 AVX_INSTR pinsrw, 0, 1, 0
1270 AVX_INSTR pmaddwd, 0, 0, 1
1271 AVX_INSTR pmaddubsw, 0, 0, 0
1272 AVX_INSTR pmaxsb, 0, 0, 1
1273 AVX_INSTR pmaxsw, 0, 0, 1
1274 AVX_INSTR pmaxsd, 0, 0, 1
1275 AVX_INSTR pmaxub, 0, 0, 1
1276 AVX_INSTR pmaxuw, 0, 0, 1
1277 AVX_INSTR pmaxud, 0, 0, 1
1278 AVX_INSTR pminsb, 0, 0, 1
1279 AVX_INSTR pminsw, 0, 0, 1
1280 AVX_INSTR pminsd, 0, 0, 1
1281 AVX_INSTR pminub, 0, 0, 1
1282 AVX_INSTR pminuw, 0, 0, 1
1283 AVX_INSTR pminud, 0, 0, 1
1297 AVX_INSTR pmuldq, 0, 0, 1
1298 AVX_INSTR pmulhrsw, 0, 0, 1
1299 AVX_INSTR pmulhuw, 0, 0, 1
1300 AVX_INSTR pmulhw, 0, 0, 1
1301 AVX_INSTR pmullw, 0, 0, 1
1302 AVX_INSTR pmulld, 0, 0, 1
1303 AVX_INSTR pmuludq, 0, 0, 1
1304 AVX_INSTR por, 0, 0, 1
1305 AVX_INSTR psadbw, 0, 0, 1
1306 AVX_INSTR pshufb, 0, 0, 0
1310 AVX_INSTR psignb, 0, 0, 0
1311 AVX_INSTR psignw, 0, 0, 0
1312 AVX_INSTR psignd, 0, 0, 0
1313 AVX_INSTR psllw, 0, 0, 0
1314 AVX_INSTR pslld, 0, 0, 0
1315 AVX_INSTR psllq, 0, 0, 0
1316 AVX_INSTR pslldq, 0, 0, 0
1317 AVX_INSTR psraw, 0, 0, 0
1318 AVX_INSTR psrad, 0, 0, 0
1319 AVX_INSTR psrlw, 0, 0, 0
1320 AVX_INSTR psrld, 0, 0, 0
1321 AVX_INSTR psrlq, 0, 0, 0
1322 AVX_INSTR psrldq, 0, 0, 0
1323 AVX_INSTR psubb, 0, 0, 0
1324 AVX_INSTR psubw, 0, 0, 0
1325 AVX_INSTR psubd, 0, 0, 0
1326 AVX_INSTR psubq, 0, 0, 0
1327 AVX_INSTR psubsb, 0, 0, 0
1328 AVX_INSTR psubsw, 0, 0, 0
1329 AVX_INSTR psubusb, 0, 0, 0
1330 AVX_INSTR psubusw, 0, 0, 0
1332 AVX_INSTR punpckhbw, 0, 0, 0
1333 AVX_INSTR punpckhwd, 0, 0, 0
1334 AVX_INSTR punpckhdq, 0, 0, 0
1335 AVX_INSTR punpckhqdq, 0, 0, 0
1336 AVX_INSTR punpcklbw, 0, 0, 0
1337 AVX_INSTR punpcklwd, 0, 0, 0
1338 AVX_INSTR punpckldq, 0, 0, 0
1339 AVX_INSTR punpcklqdq, 0, 0, 0
1340 AVX_INSTR pxor, 0, 0, 1
1341 AVX_INSTR rcpps, 1, 0, 0
1342 AVX_INSTR rcpss, 1, 0, 0
1347 AVX_INSTR rsqrtps, 1, 0, 0
1348 AVX_INSTR rsqrtss, 1, 0, 0
1349 AVX_INSTR shufpd, 1, 1, 0
1350 AVX_INSTR shufps, 1, 1, 0
1351 AVX_INSTR sqrtpd, 1, 0, 0
1352 AVX_INSTR sqrtps, 1, 0, 0
1353 AVX_INSTR sqrtsd, 1, 0, 0
1354 AVX_INSTR sqrtss, 1, 0, 0
1356 AVX_INSTR subpd, 1, 0, 0
1357 AVX_INSTR subps, 1, 0, 0
1358 AVX_INSTR subsd, 1, 0, 0
1359 AVX_INSTR subss, 1, 0, 0
1362 AVX_INSTR unpckhpd, 1, 0, 0
1363 AVX_INSTR unpckhps, 1, 0, 0
1364 AVX_INSTR unpcklpd, 1, 0, 0
1365 AVX_INSTR unpcklps, 1, 0, 0
1366 AVX_INSTR xorpd, 1, 0, 1
1367 AVX_INSTR xorps, 1, 0, 1
1369 ; 3DNow instructions, for sharing code between AVX, SSE and 3DN
1370 AVX_INSTR pfadd, 1, 0, 1
1371 AVX_INSTR pfsub, 1, 0, 0
1372 AVX_INSTR pfmul, 1, 0, 1
1374 ; base-4 constants for shuffles
1377 %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
1379 CAT_XDEFINE q000, j, i
1381 CAT_XDEFINE q00, j, i
1383 CAT_XDEFINE q0, j, i
1393 %macro %1 4-7 %1, %2, %3
1403 FMA_INSTR pmacsdd, pmulld, paddd
1404 FMA_INSTR pmacsww, pmullw, paddw
1405 FMA_INSTR pmadcswd, pmaddwd, paddd
1407 ; convert FMA4 to FMA3 if possible
1409 %macro %1 4-8 %1, %2, %3, %4
1413 v%6 %1, %4, %3 ; %1 = %1 * %3 + %4
1415 v%7 %1, %2, %4 ; %1 = %2 * %1 + %4
1417 v%8 %1, %2, %3 ; %1 = %2 * %3 + %1
1419 %error fma3 emulation of ``%5 %1, %2, %3, %4'' is not supported
1424 FMA4_INSTR fmaddpd, fmadd132pd, fmadd213pd, fmadd231pd
1425 FMA4_INSTR fmaddps, fmadd132ps, fmadd213ps, fmadd231ps
1426 FMA4_INSTR fmaddsd, fmadd132sd, fmadd213sd, fmadd231sd
1427 FMA4_INSTR fmaddss, fmadd132ss, fmadd213ss, fmadd231ss
1429 FMA4_INSTR fmaddsubpd, fmaddsub132pd, fmaddsub213pd, fmaddsub231pd
1430 FMA4_INSTR fmaddsubps, fmaddsub132ps, fmaddsub213ps, fmaddsub231ps
1431 FMA4_INSTR fmsubaddpd, fmsubadd132pd, fmsubadd213pd, fmsubadd231pd
1432 FMA4_INSTR fmsubaddps, fmsubadd132ps, fmsubadd213ps, fmsubadd231ps
1434 FMA4_INSTR fmsubpd, fmsub132pd, fmsub213pd, fmsub231pd
1435 FMA4_INSTR fmsubps, fmsub132ps, fmsub213ps, fmsub231ps
1436 FMA4_INSTR fmsubsd, fmsub132sd, fmsub213sd, fmsub231sd
1437 FMA4_INSTR fmsubss, fmsub132ss, fmsub213ss, fmsub231ss
1439 FMA4_INSTR fnmaddpd, fnmadd132pd, fnmadd213pd, fnmadd231pd
1440 FMA4_INSTR fnmaddps, fnmadd132ps, fnmadd213ps, fnmadd231ps
1441 FMA4_INSTR fnmaddsd, fnmadd132sd, fnmadd213sd, fnmadd231sd
1442 FMA4_INSTR fnmaddss, fnmadd132ss, fnmadd213ss, fnmadd231ss
1444 FMA4_INSTR fnmsubpd, fnmsub132pd, fnmsub213pd, fnmsub231pd
1445 FMA4_INSTR fnmsubps, fnmsub132ps, fnmsub213ps, fnmsub231ps
1446 FMA4_INSTR fnmsubsd, fnmsub132sd, fnmsub213sd, fnmsub231sd
1447 FMA4_INSTR fnmsubss, fnmsub132ss, fnmsub213ss, fnmsub231ss
1449 ; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug
1450 %if ARCH_X86_64 == 0
1451 %macro vpbroadcastq 2