INIT_XMM
IDCT_ADD_10 sse2
-%ifdef HAVE_AVX
+%if HAVE_AVX
INIT_AVX
IDCT_ADD_10 avx
%endif
INIT_XMM
ALIGN 16
ADD4x4IDCT sse2
-%ifdef HAVE_AVX
+%if HAVE_AVX
INIT_AVX
ALIGN 16
ADD4x4IDCT avx
INIT_XMM
IDCT_ADD16_10 sse2
-%ifdef HAVE_AVX
+%if HAVE_AVX
INIT_AVX
IDCT_ADD16_10 avx
%endif
INIT_XMM
IDCT8_DC_ADD sse2
-%ifdef HAVE_AVX
+%if HAVE_AVX
INIT_AVX
IDCT8_DC_ADD avx
%endif
INIT_XMM
IDCT_ADD16INTRA_10 sse2
-%ifdef HAVE_AVX
+%if HAVE_AVX
INIT_AVX
IDCT_ADD16INTRA_10 avx
%endif
;-----------------------------------------------------------------------------
%macro IDCT_ADD8 1
cglobal h264_idct_add8_10_%1,5,7
-%ifdef ARCH_X86_64
+%if ARCH_X86_64
mov r10, r0
%endif
add r2, 1024
ADD16_OP_INTRA %1, 16, 4+ 6*8
ADD16_OP_INTRA %1, 18, 4+ 7*8
add r2, 1024-128*2
-%ifdef ARCH_X86_64
+%if ARCH_X86_64
mov r0, [r10+gprsize]
%else
mov r0, r0m
INIT_XMM
IDCT_ADD8 sse2
-%ifdef HAVE_AVX
+%if HAVE_AVX
INIT_AVX
IDCT_ADD8 avx
%endif
; %1=int16_t *block, %2=int16_t *dstblock
%macro IDCT8_ADD_SSE_START 2
IDCT8_1D_FULL %1
-%ifdef ARCH_X86_64
+%if ARCH_X86_64
TRANSPOSE4x4D 0,1,2,3,8
mova [%2 ], m0
TRANSPOSE4x4D 4,5,6,7,8
%macro IDCT8_ADD 1
cglobal h264_idct8_add_10_%1, 3,4,16
-%ifndef UNIX64
+%if UNIX64 == 0
%assign pad 16-gprsize-(stack_offset&15)
sub rsp, pad
call h264_idct8_add1_10_%1
sub rsp, pad
add dword [r1], 32
-%ifdef ARCH_X86_64
+%if ARCH_X86_64
IDCT8_ADD_SSE_START r1, rsp
SWAP 1, 9
SWAP 2, 10
INIT_XMM
IDCT8_ADD sse2
-%ifdef HAVE_AVX
+%if HAVE_AVX
INIT_AVX
IDCT8_ADD avx
%endif
INIT_XMM
IDCT8_ADD4 sse2
-%ifdef HAVE_AVX
+%if HAVE_AVX
INIT_AVX
IDCT8_ADD4 avx
%endif