%define ABS2 ABS2_MMX
AC3_MAX_MSB_ABS_INT16 or_abs
INIT_MMX mmx2
-%define ABS2 ABS2_MMX2
+%define ABS2 ABS2_MMXEXT
AC3_MAX_MSB_ABS_INT16 min_max
INIT_XMM sse2
AC3_MAX_MSB_ABS_INT16 min_max
"mov" #size " " #b ", " #temp " \n\t"\
"pavgusb " #temp ", " #a " \n\t"\
"mov" #size " " #a ", " #b " \n\t"
-#define AVG_MMX2_OP(a,b,temp, size) \
+#define AVG_MMXEXT_OP(a, b, temp, size) \
"mov" #size " " #b ", " #temp " \n\t"\
"pavgb " #temp ", " #a " \n\t"\
"mov" #size " " #a ", " #b " \n\t"
#if HAVE_MMXEXT_INLINE
QPEL_CAVS(put_, PUT_OP, mmx2)
-QPEL_CAVS(avg_, AVG_MMX2_OP, mmx2)
+QPEL_CAVS(avg_,AVG_MMXEXT_OP, mmx2)
CAVS_MC(put_, 8, mmx2)
CAVS_MC(put_, 16,mmx2)
"packuswb %%mm5, %%mm5 \n\t" \
OP(%%mm5, out, %%mm7, d)
-#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW) \
+#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMXEXT, OP_3DNOW) \
static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, \
uint8_t *src, \
int dstStride, \
"psraw $5, %%mm3 \n\t" \
"movq %5, %%mm1 \n\t" \
"packuswb %%mm3, %%mm1 \n\t" \
- OP_MMX2(%%mm1, (%1), %%mm4, q) \
+ OP_MMXEXT(%%mm1, (%1), %%mm4, q) \
/* mm0 = GHIJ, mm2 = FGHI, mm5 = HIJK, mm6 = IJKL, mm7 = 0 */ \
\
"movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */ \
"paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */ \
"psraw $5, %%mm4 \n\t" \
"packuswb %%mm4, %%mm0 \n\t" \
- OP_MMX2(%%mm0, 8(%1), %%mm4, q) \
+ OP_MMXEXT(%%mm0, 8(%1), %%mm4, q) \
\
"add %3, %0 \n\t" \
"add %4, %1 \n\t" \
"paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */ \
"psraw $5, %%mm3 \n\t" \
"packuswb %%mm3, %%mm0 \n\t" \
- OP_MMX2(%%mm0, (%1), %%mm4, q) \
+ OP_MMXEXT(%%mm0, (%1), %%mm4, q) \
\
"add %3, %0 \n\t" \
"add %4, %1 \n\t" \
"pavgusb "#temp", "#a" \n\t" \
"mov"#size" "#a", "#b" \n\t"
-#define AVG_MMX2_OP(a, b, temp, size) \
+#define AVG_MMXEXT_OP(a, b, temp, size) \
"mov"#size" "#b", "#temp" \n\t" \
"pavgb "#temp", "#a" \n\t" \
"mov"#size" "#a", "#b" \n\t"
QPEL_BASE(put_, ff_pw_16, _, PUT_OP, PUT_OP)
-QPEL_BASE(avg_, ff_pw_16, _, AVG_MMX2_OP, AVG_3DNOW_OP)
+QPEL_BASE(avg_, ff_pw_16, _, AVG_MMXEXT_OP, AVG_3DNOW_OP)
QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
QPEL_OP(put_, ff_pw_16, _, PUT_OP, 3dnow)
QPEL_OP(avg_, ff_pw_16, _, AVG_3DNOW_OP, 3dnow)
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
QPEL_OP(put_, ff_pw_16, _, PUT_OP, mmx2)
-QPEL_OP(avg_, ff_pw_16, _, AVG_MMX2_OP, mmx2)
+QPEL_OP(avg_, ff_pw_16, _, AVG_MMXEXT_OP, mmx2)
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
/***********************************/
movd %3, %1
%endmacro
-%macro HSUM_MMX2 3
+%macro HSUM_MMXEXT 3
pshufw %2, %1, 0xE
paddusw %1, %2
pshufw %2, %1, 0x1
%define HSUM HSUM_MMX
HADAMARD8_DIFF_MMX mmx
-%define ABS1 ABS1_MMX2
-%define HSUM HSUM_MMX2
+%define ABS1 ABS1_MMXEXT
+%define HSUM HSUM_MMXEXT
HADAMARD8_DIFF_MMX mmx2
INIT_XMM
-%define ABS2 ABS2_MMX2
+%define ABS2 ABS2_MMXEXT
%if ARCH_X86_64
%define ABS_SUM_8x8 ABS_SUM_8x8_64
%else
"pxor " #z ", " #a " \n\t"\
"psubw " #z ", " #a " \n\t"
-#define MMABS_MMX2(a,z)\
+#define MMABS_MMXEXT(a, z) \
"pxor " #z ", " #z " \n\t"\
"psubw " #a ", " #z " \n\t"\
"pmaxsw " #z ", " #a " \n\t"
"paddusw "#t", "#a" \n\t"\
"movd "#a", "#dst" \n\t"\
-#define HSUM_MMX2(a, t, dst)\
+#define HSUM_MMXEXT(a, t, dst) \
"pshufw $0x0E, "#a", "#t" \n\t"\
"paddusw "#t", "#a" \n\t"\
"pshufw $0x01, "#a", "#t" \n\t"\
#undef MMABS
#undef HSUM
-#define HSUM(a,t,dst) HSUM_MMX2(a,t,dst)
-#define MMABS(a,z) MMABS_MMX2(a,z)
+#define HSUM(a,t,dst) HSUM_MMXEXT(a,t,dst)
+#define MMABS(a,z) MMABS_MMXEXT(a,z)
DCT_SAD_FUNC(mmx2)
#undef HSUM
#undef DCT_SAD
IDCT8_ADD_SSE r0, r1, r2, r3
RET
-%macro DC_ADD_MMX2_INIT 2-3
+%macro DC_ADD_MMXEXT_INIT 2-3
%if %0 == 2
movsx %1, word [%1]
add %1, 32
packuswb m1, m1
%endmacro
-%macro DC_ADD_MMX2_OP 4
+%macro DC_ADD_MMXEXT_OP 4
%1 m2, [%2 ]
%1 m3, [%2+%3 ]
%1 m4, [%2+%3*2]
INIT_MMX
; ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
cglobal h264_idct_dc_add_8_mmx2, 3, 3, 0
- DC_ADD_MMX2_INIT r1, r2
- DC_ADD_MMX2_OP movh, r0, r2, r1
+ DC_ADD_MMXEXT_INIT r1, r2
+ DC_ADD_MMXEXT_OP movh, r0, r2, r1
RET
; ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
cglobal h264_idct8_dc_add_8_mmx2, 3, 3, 0
- DC_ADD_MMX2_INIT r1, r2
- DC_ADD_MMX2_OP mova, r0, r2, r1
+ DC_ADD_MMXEXT_INIT r1, r2
+ DC_ADD_MMXEXT_OP mova, r0, r2, r1
lea r0, [r0+r2*4]
- DC_ADD_MMX2_OP mova, r0, r2, r1
+ DC_ADD_MMXEXT_OP mova, r0, r2, r1
RET
; ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset,
movsx r6, word [r2]
test r6, r6
jz .no_dc
- DC_ADD_MMX2_INIT r2, r3, r6
+ DC_ADD_MMXEXT_INIT r2, r3, r6
%if ARCH_X86_64 == 0
%define dst2q r1
%define dst2d r1d
%endif
mov dst2d, dword [r1+r5*4]
lea dst2q, [r0+dst2q]
- DC_ADD_MMX2_OP movh, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
movsx r6, word [r2]
test r6, r6
jz .skipblock
- DC_ADD_MMX2_INIT r2, r3, r6
+ DC_ADD_MMXEXT_INIT r2, r3, r6
%if ARCH_X86_64 == 0
%define dst2q r1
%define dst2d r1d
%endif
mov dst2d, dword [r1+r5*4]
add dst2q, r0
- DC_ADD_MMX2_OP movh, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
movsx r6, word [r2]
test r6, r6
jz .no_dc
- DC_ADD_MMX2_INIT r2, r3, r6
+ DC_ADD_MMXEXT_INIT r2, r3, r6
%if ARCH_X86_64 == 0
%define dst2q r1
%define dst2d r1d
%endif
mov dst2d, dword [r1+r5*4]
lea dst2q, [r0+dst2q]
- DC_ADD_MMX2_OP mova, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
lea dst2q, [dst2q+r3*4]
- DC_ADD_MMX2_OP mova, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
test r6, r6
jz .no_dc
INIT_MMX
- DC_ADD_MMX2_INIT r2, r3, r6
+ DC_ADD_MMXEXT_INIT r2, r3, r6
%if ARCH_X86_64 == 0
%define dst2q r1
%define dst2d r1d
%endif
mov dst2d, dword [r1+r5*4]
add dst2q, r0
- DC_ADD_MMX2_OP mova, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
lea dst2q, [dst2q+r3*4]
- DC_ADD_MMX2_OP mova, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
movsx r6, word [r2]
test r6, r6
jz .skipblock
- DC_ADD_MMX2_INIT r2, r3, r6
+ DC_ADD_MMXEXT_INIT r2, r3, r6
%if ARCH_X86_64
mov r0d, dword [r1+r5*4]
add r0, [dst2q]
mov r0, [r0]
add r0, dword [r1+r5*4]
%endif
- DC_ADD_MMX2_OP movh, r0, r3, r6
+ DC_ADD_MMXEXT_OP movh, r0, r3, r6
.skipblock:
inc r5
add r2, 32
pshufw m1, m0, 0xFA ; -d-d-d-d-D-D-D-D
punpcklwd m0, m0 ; d d d d D D D D
lea r6, [r3*3]
- DC_ADD_MMX2_OP movq, r0, r3, r6
+ DC_ADD_MMXEXT_OP movq, r0, r3, r6
ret
ALIGN 16
#undef PAVGB
#define PAVGB "pavgb"
QPEL_H264(put_, PUT_OP, mmx2)
-QPEL_H264(avg_, AVG_MMX2_OP, mmx2)
+QPEL_H264(avg_,AVG_MMXEXT_OP, mmx2)
QPEL_H264_V_XMM(put_, PUT_OP, sse2)
-QPEL_H264_V_XMM(avg_, AVG_MMX2_OP, sse2)
+QPEL_H264_V_XMM(avg_,AVG_MMXEXT_OP, sse2)
QPEL_H264_HV_XMM(put_, PUT_OP, sse2)
-QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, sse2)
+QPEL_H264_HV_XMM(avg_,AVG_MMXEXT_OP, sse2)
#if HAVE_SSSE3_INLINE
QPEL_H264_H_XMM(put_, PUT_OP, ssse3)
-QPEL_H264_H_XMM(avg_, AVG_MMX2_OP, ssse3)
+QPEL_H264_H_XMM(avg_,AVG_MMXEXT_OP, ssse3)
QPEL_H264_HV2_XMM(put_, PUT_OP, ssse3)
-QPEL_H264_HV2_XMM(avg_, AVG_MMX2_OP, ssse3)
+QPEL_H264_HV2_XMM(avg_,AVG_MMXEXT_OP, ssse3)
QPEL_H264_HV_XMM(put_, PUT_OP, ssse3)
-QPEL_H264_HV_XMM(avg_, AVG_MMX2_OP, ssse3)
+QPEL_H264_HV_XMM(avg_,AVG_MMXEXT_OP, ssse3)
#endif
#undef PAVGB
RET
%endmacro
-%define PABSW PABSW_MMX2
+%define PABSW PABSW_MMXEXT
VC1_LF_MMX mmx2
INIT_XMM
psubw %1, %2
%endmacro
-%macro PABSW_MMX2 2
+%macro PABSW_MMXEXT 2
pxor %1, %1
psubw %1, %2
pmaxsw %1, %2
psubw %2, %4
%endmacro
-%macro ABS1_MMX2 2 ; a, tmp
+%macro ABS1_MMXEXT 2 ; a, tmp
pxor %2, %2
psubw %2, %1
pmaxsw %1, %2
%endmacro
-%macro ABS2_MMX2 4 ; a, b, tmp0, tmp1
+%macro ABS2_MMXEXT 4 ; a, b, tmp0, tmp1
pxor %3, %3
pxor %4, %4
psubw %3, %1
"cmp "#dstw", "#index" \n\t"\
" jb 1b \n\t"
-#define WRITEBGR24MMX2(dst, dstw, index) \
+#define WRITEBGR24MMXEXT(dst, dstw, index) \
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
"movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
"movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
#if COMPILE_TEMPLATE_MMXEXT
#undef WRITEBGR24
-#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
+#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMXEXT(dst, dstw, index)
#else
#undef WRITEBGR24
#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
PREFETCH" 64(%%"REG_c") \n\t"
#if ARCH_X86_64
-#define CALL_MMX2_FILTER_CODE \
+#define CALL_MMXEXT_FILTER_CODE \
"movl (%%"REG_b"), %%esi \n\t"\
"call *%4 \n\t"\
"movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
"xor %%"REG_a", %%"REG_a" \n\t"\
#else
-#define CALL_MMX2_FILTER_CODE \
+#define CALL_MMXEXT_FILTER_CODE \
"movl (%%"REG_b"), %%esi \n\t"\
"call *%4 \n\t"\
"addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
#endif /* ARCH_X86_64 */
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
#if defined(PIC)
"mov %5, %%"REG_b" \n\t"
PREFETCH" 32(%%"REG_c") \n\t"
PREFETCH" 64(%%"REG_c") \n\t"
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
"xor %%"REG_a", %%"REG_a" \n\t" // i
"mov %5, %%"REG_c" \n\t" // src
"mov %6, %%"REG_D" \n\t" // buf2
PREFETCH" 32(%%"REG_c") \n\t"
PREFETCH" 64(%%"REG_c") \n\t"
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
- CALL_MMX2_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
+ CALL_MMXEXT_FILTER_CODE
#if defined(PIC)
"mov %7, %%"REG_b" \n\t"