SECTION_RODATA
-; FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split
scan8_mem: db 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8
db 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8
db 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8
paddw m0, m6
IDCT4_1D w, 0, 1, 2, 3, 4, 5
pxor m7, m7
+ movq [%2+ 0], m7
+ movq [%2+ 8], m7
+ movq [%2+16], m7
+ movq [%2+24], m7
STORE_DIFFx2 m0, m1, m4, m5, m7, 6, %1, %3
lea %1, [%1+%3*2]
STORE_DIFFx2 m2, m3, m4, m5, m7, 6, %1, %3
%endmacro
-INIT_MMX
-; ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
-cglobal h264_idct_add_8_mmx, 3, 3, 0
+INIT_MMX mmx
+; void ff_h264_idct_add_8_mmx(uint8_t *dst, int16_t *block, int stride)
+cglobal h264_idct_add_8, 3, 3, 0
+ movsxdifnidn r2, r2d
IDCT4_ADD r0, r1, r2
RET
%endmacro
; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
-%macro IDCT8_ADD_MMX_END 3
+%macro IDCT8_ADD_MMX_END 3-4
IDCT8_1D_FULL %2
mova [%2 ], m5
mova [%2+16], m6
mova [%2+32], m7
pxor m7, m7
+%if %0 == 4
+ movq [%4+ 0], m7
+ movq [%4+ 8], m7
+ movq [%4+ 16], m7
+ movq [%4+ 24], m7
+ movq [%4+ 32], m7
+ movq [%4+ 40], m7
+ movq [%4+ 48], m7
+ movq [%4+ 56], m7
+ movq [%4+ 64], m7
+ movq [%4+ 72], m7
+ movq [%4+ 80], m7
+ movq [%4+ 88], m7
+ movq [%4+ 96], m7
+ movq [%4+104], m7
+ movq [%4+112], m7
+ movq [%4+120], m7
+%endif
STORE_DIFFx2 m0, m1, m5, m6, m7, 6, %1, %3
lea %1, [%1+%3*2]
STORE_DIFFx2 m2, m3, m5, m6, m7, 6, %1, %3
STORE_DIFFx2 m1, m2, m5, m6, m7, 6, %1, %3
%endmacro
-INIT_MMX
-; ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
-cglobal h264_idct8_add_8_mmx, 3, 4, 0
+INIT_MMX mmx
+; void ff_h264_idct8_add_8_mmx(uint8_t *dst, int16_t *block, int stride)
+cglobal h264_idct8_add_8, 3, 4, 0
+ movsxdifnidn r2, r2d
%assign pad 128+4-(stack_offset&7)
SUB rsp, pad
IDCT8_ADD_MMX_START r1 , rsp
IDCT8_ADD_MMX_START r1+8, rsp+64
lea r3, [r0+4]
- IDCT8_ADD_MMX_END r0 , rsp, r2
+ IDCT8_ADD_MMX_END r0 , rsp, r2, r1
IDCT8_ADD_MMX_END r3 , rsp+8, r2
ADD rsp, pad
SWAP 0, 8
SWAP 1, 9
%endif
+ mova [%2+ 0], m7
+ mova [%2+ 16], m7
+ mova [%2+ 32], m7
+ mova [%2+ 48], m7
+ mova [%2+ 64], m7
+ mova [%2+ 80], m7
+ mova [%2+ 96], m7
+ mova [%2+112], m7
lea %1, [%1+%3*4]
STORE_DIFF m4, m6, m7, [%1 ]
STORE_DIFF m5, m6, m7, [%1+%3 ]
STORE_DIFF m1, m6, m7, [%1+%4 ]
%endmacro
-INIT_XMM
-; ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride)
-cglobal h264_idct8_add_8_sse2, 3, 4, 10
+INIT_XMM sse2
+; void ff_h264_idct8_add_8_sse2(uint8_t *dst, int16_t *block, int stride)
+cglobal h264_idct8_add_8, 3, 4, 10
+ movsxdifnidn r2, r2d
IDCT8_ADD_SSE r0, r1, r2, r3
RET
-%macro DC_ADD_MMX2_INIT 2-3
-%if %0 == 2
- movsx %1, word [%1]
+%macro DC_ADD_MMXEXT_INIT 2
add %1, 32
sar %1, 6
movd m0, %1d
lea %1, [%2*3]
-%else
- add %3, 32
- sar %3, 6
- movd m0, %3d
- lea %3, [%2*3]
-%endif
pshufw m0, m0, 0
pxor m1, m1
psubw m1, m0
packuswb m1, m1
%endmacro
-%macro DC_ADD_MMX2_OP 4
+%macro DC_ADD_MMXEXT_OP 4
%1 m2, [%2 ]
%1 m3, [%2+%3 ]
%1 m4, [%2+%3*2]
%1 [%2+%4 ], m5
%endmacro
-INIT_MMX
-; ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
-cglobal h264_idct_dc_add_8_mmx2, 3, 3, 0
- DC_ADD_MMX2_INIT r1, r2
- DC_ADD_MMX2_OP movh, r0, r2, r1
+INIT_MMX mmxext
+; void ff_h264_idct_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
+%if ARCH_X86_64
+cglobal h264_idct_dc_add_8, 3, 4, 0
+ movsxd r2, r2d
+ movsx r3, word [r1]
+ mov dword [r1], 0
+ DC_ADD_MMXEXT_INIT r3, r2
+ DC_ADD_MMXEXT_OP movh, r0, r2, r3
RET
-; ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
-cglobal h264_idct8_dc_add_8_mmx2, 3, 3, 0
- DC_ADD_MMX2_INIT r1, r2
- DC_ADD_MMX2_OP mova, r0, r2, r1
+; void ff_h264_idct8_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
+cglobal h264_idct8_dc_add_8, 3, 4, 0
+ movsxd r2, r2d
+ movsx r3, word [r1]
+ mov dword [r1], 0
+ DC_ADD_MMXEXT_INIT r3, r2
+ DC_ADD_MMXEXT_OP mova, r0, r2, r3
lea r0, [r0+r2*4]
- DC_ADD_MMX2_OP mova, r0, r2, r1
+ DC_ADD_MMXEXT_OP mova, r0, r2, r3
+ RET
+%else
+; void ff_h264_idct_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
+cglobal h264_idct_dc_add_8, 2, 3, 0
+ movsx r2, word [r1]
+ mov dword [r1], 0
+ mov r1, r2m
+ DC_ADD_MMXEXT_INIT r2, r1
+ DC_ADD_MMXEXT_OP movh, r0, r1, r2
RET
-; ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset,
-; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
-cglobal h264_idct_add16_8_mmx, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
+; void ff_h264_idct8_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
+cglobal h264_idct8_dc_add_8, 2, 3, 0
+ movsx r2, word [r1]
+ mov dword [r1], 0
+ mov r1, r2m
+ DC_ADD_MMXEXT_INIT r2, r1
+ DC_ADD_MMXEXT_OP mova, r0, r1, r2
+ lea r0, [r0+r1*4]
+ DC_ADD_MMXEXT_OP mova, r0, r1, r2
+ RET
+%endif
+
+INIT_MMX mmx
+; void ff_h264_idct_add16_8_mmx(uint8_t *dst, const int *block_offset,
+; int16_t *block, int stride,
+; const uint8_t nnzc[6 * 8])
+cglobal h264_idct_add16_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
+ movsxdifnidn r3, r3d
xor r5, r5
%ifdef PIC
lea picregq, [scan8_mem]
jl .nextblock
REP_RET
-; ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset,
-; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
-cglobal h264_idct8_add4_8_mmx, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
+; void ff_h264_idct8_add4_8_mmx(uint8_t *dst, const int *block_offset,
+; int16_t *block, int stride,
+; const uint8_t nnzc[6 * 8])
+cglobal h264_idct8_add4_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
+ movsxdifnidn r3, r3d
%assign pad 128+4-(stack_offset&7)
SUB rsp, pad
add word [r2], 32
IDCT8_ADD_MMX_START r2 , rsp
IDCT8_ADD_MMX_START r2+8, rsp+64
- IDCT8_ADD_MMX_END r6 , rsp, r3
+ IDCT8_ADD_MMX_END r6 , rsp, r3, r2
mov r6d, dword [r1+r5*4]
lea r6, [r0+r6+4]
IDCT8_ADD_MMX_END r6 , rsp+8, r3
ADD rsp, pad
RET
-; ff_h264_idct_add16_mmx2(uint8_t *dst, const int *block_offset,
-; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
-cglobal h264_idct_add16_8_mmx2, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
+INIT_MMX mmxext
+; void ff_h264_idct_add16_8_mmxext(uint8_t *dst, const int *block_offset,
+; int16_t *block, int stride,
+; const uint8_t nnzc[6 * 8])
+cglobal h264_idct_add16_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
+ movsxdifnidn r3, r3d
xor r5, r5
%ifdef PIC
lea picregq, [scan8_mem]
movsx r6, word [r2]
test r6, r6
jz .no_dc
- DC_ADD_MMX2_INIT r2, r3, r6
+ mov word [r2], 0
+ DC_ADD_MMXEXT_INIT r6, r3
%if ARCH_X86_64 == 0
%define dst2q r1
%define dst2d r1d
%endif
mov dst2d, dword [r1+r5*4]
lea dst2q, [r0+dst2q]
- DC_ADD_MMX2_OP movh, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
jl .nextblock
REP_RET
-; ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset,
-; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
-cglobal h264_idct_add16intra_8_mmx, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
+INIT_MMX mmx
+; void ff_h264_idct_add16intra_8_mmx(uint8_t *dst, const int *block_offset,
+; int16_t *block, int stride,
+; const uint8_t nnzc[6 * 8])
+cglobal h264_idct_add16intra_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
+ movsxdifnidn r3, r3d
xor r5, r5
%ifdef PIC
lea picregq, [scan8_mem]
jl .nextblock
REP_RET
-; ff_h264_idct_add16intra_mmx2(uint8_t *dst, const int *block_offset,
-; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
-cglobal h264_idct_add16intra_8_mmx2, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
+INIT_MMX mmxext
+; void ff_h264_idct_add16intra_8_mmxext(uint8_t *dst, const int *block_offset,
+; int16_t *block, int stride,
+; const uint8_t nnzc[6 * 8])
+cglobal h264_idct_add16intra_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
+ movsxdifnidn r3, r3d
xor r5, r5
%ifdef PIC
lea picregq, [scan8_mem]
movsx r6, word [r2]
test r6, r6
jz .skipblock
- DC_ADD_MMX2_INIT r2, r3, r6
+ mov word [r2], 0
+ DC_ADD_MMXEXT_INIT r6, r3
%if ARCH_X86_64 == 0
%define dst2q r1
%define dst2d r1d
%endif
mov dst2d, dword [r1+r5*4]
add dst2q, r0
- DC_ADD_MMX2_OP movh, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
jl .nextblock
REP_RET
-; ff_h264_idct8_add4_mmx2(uint8_t *dst, const int *block_offset,
-; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
-cglobal h264_idct8_add4_8_mmx2, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
+; void ff_h264_idct8_add4_8_mmxext(uint8_t *dst, const int *block_offset,
+; int16_t *block, int stride,
+; const uint8_t nnzc[6 * 8])
+cglobal h264_idct8_add4_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
+ movsxdifnidn r3, r3d
%assign pad 128+4-(stack_offset&7)
SUB rsp, pad
movsx r6, word [r2]
test r6, r6
jz .no_dc
- DC_ADD_MMX2_INIT r2, r3, r6
+ mov word [r2], 0
+ DC_ADD_MMXEXT_INIT r6, r3
%if ARCH_X86_64 == 0
%define dst2q r1
%define dst2d r1d
%endif
mov dst2d, dword [r1+r5*4]
lea dst2q, [r0+dst2q]
- DC_ADD_MMX2_OP mova, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
lea dst2q, [dst2q+r3*4]
- DC_ADD_MMX2_OP mova, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
add word [r2], 32
IDCT8_ADD_MMX_START r2 , rsp
IDCT8_ADD_MMX_START r2+8, rsp+64
- IDCT8_ADD_MMX_END r6 , rsp, r3
+ IDCT8_ADD_MMX_END r6 , rsp, r3, r2
mov r6d, dword [r1+r5*4]
lea r6, [r0+r6+4]
IDCT8_ADD_MMX_END r6 , rsp+8, r3
ADD rsp, pad
RET
-INIT_XMM
-; ff_h264_idct8_add4_sse2(uint8_t *dst, const int *block_offset,
-; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
-cglobal h264_idct8_add4_8_sse2, 5, 8 + npicregs, 10, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
+INIT_XMM sse2
+; void ff_h264_idct8_add4_8_sse2(uint8_t *dst, const int *block_offset,
+; int16_t *block, int stride,
+; const uint8_t nnzc[6 * 8])
+cglobal h264_idct8_add4_8, 5, 8 + npicregs, 10, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
+ movsxdifnidn r3, r3d
xor r5, r5
%ifdef PIC
lea picregq, [scan8_mem]
movsx r6, word [r2]
test r6, r6
jz .no_dc
-INIT_MMX
- DC_ADD_MMX2_INIT r2, r3, r6
+INIT_MMX cpuname
+ mov word [r2], 0
+ DC_ADD_MMXEXT_INIT r6, r3
%if ARCH_X86_64 == 0
%define dst2q r1
%define dst2d r1d
%endif
mov dst2d, dword [r1+r5*4]
add dst2q, r0
- DC_ADD_MMX2_OP mova, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
lea dst2q, [dst2q+r3*4]
- DC_ADD_MMX2_OP mova, dst2q, r3, r6
+ DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
%if ARCH_X86_64 == 0
mov r1, r1m
%endif
jl .nextblock
REP_RET
.no_dc:
-INIT_XMM
+INIT_XMM cpuname
mov dst2d, dword [r1+r5*4]
add dst2q, r0
IDCT8_ADD_SSE dst2q, r2, r3, r6
jl .nextblock
REP_RET
-INIT_MMX
+INIT_MMX mmx
h264_idct_add8_mmx_plane:
+ movsxdifnidn r3, r3d
.nextblock:
movzx r6, byte [scan8+r5]
movzx r6, byte [r4+r6]
jnz .nextblock
rep ret
-; ff_h264_idct_add8_mmx(uint8_t **dest, const int *block_offset,
-; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
-cglobal h264_idct_add8_8_mmx, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
+; void ff_h264_idct_add8_8_mmx(uint8_t **dest, const int *block_offset,
+; int16_t *block, int stride,
+; const uint8_t nnzc[6 * 8])
+cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
+ movsxdifnidn r3, r3d
mov r5, 16
add r2, 512
%ifdef PIC
call h264_idct_add8_mmx_plane
RET
-h264_idct_add8_mmx2_plane:
+h264_idct_add8_mmxext_plane:
+ movsxdifnidn r3, r3d
.nextblock:
movzx r6, byte [scan8+r5]
movzx r6, byte [r4+r6]
movsx r6, word [r2]
test r6, r6
jz .skipblock
- DC_ADD_MMX2_INIT r2, r3, r6
+ mov word [r2], 0
+ DC_ADD_MMXEXT_INIT r6, r3
%if ARCH_X86_64
mov r0d, dword [r1+r5*4]
add r0, [dst2q]
mov r0, [r0]
add r0, dword [r1+r5*4]
%endif
- DC_ADD_MMX2_OP movh, r0, r3, r6
+ DC_ADD_MMXEXT_OP movh, r0, r3, r6
.skipblock:
inc r5
add r2, 32
jnz .nextblock
rep ret
-; ff_h264_idct_add8_mmx2(uint8_t **dest, const int *block_offset,
-; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
-cglobal h264_idct_add8_8_mmx2, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
+INIT_MMX mmxext
+; void ff_h264_idct_add8_8_mmxext(uint8_t **dest, const int *block_offset,
+; int16_t *block, int stride,
+; const uint8_t nnzc[6 * 8])
+cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
+ movsxdifnidn r3, r3d
mov r5, 16
add r2, 512
%if ARCH_X86_64
%ifdef PIC
lea picregq, [scan8_mem]
%endif
- call h264_idct_add8_mmx2_plane
+ call h264_idct_add8_mmxext_plane
mov r5, 32
add r2, 384
%if ARCH_X86_64
%else
add r0mp, gprsize
%endif
- call h264_idct_add8_mmx2_plane
+ call h264_idct_add8_mmxext_plane
RET
-INIT_MMX
; r0 = uint8_t *dst, r2 = int16_t *block, r3 = int stride, r6=clobbered
-h264_idct_dc_add8_mmx2:
+h264_idct_dc_add8_mmxext:
+ movsxdifnidn r3, r3d
movd m0, [r2 ] ; 0 0 X D
+ mov word [r2+ 0], 0
punpcklwd m0, [r2+32] ; x X d D
+ mov word [r2+32], 0
paddsw m0, [pw_32]
psraw m0, 6
punpcklwd m0, m0 ; d d D D
pshufw m1, m0, 0xFA ; -d-d-d-d-D-D-D-D
punpcklwd m0, m0 ; d d d d D D D D
lea r6, [r3*3]
- DC_ADD_MMX2_OP movq, r0, r3, r6
+ DC_ADD_MMXEXT_OP movq, r0, r3, r6
ret
ALIGN 16
-INIT_XMM
+INIT_XMM sse2
; r0 = uint8_t *dst (clobbered), r2 = int16_t *block, r3 = int stride
h264_add8x4_idct_sse2:
+ movsxdifnidn r3, r3d
movq m0, [r2+ 0]
movq m1, [r2+ 8]
movq m2, [r2+16]
paddw m0, [pw_32]
IDCT4_1D w,0,1,2,3,4,5
pxor m7, m7
+ mova [r2+ 0], m7
+ mova [r2+16], m7
+ mova [r2+32], m7
+ mova [r2+48], m7
STORE_DIFFx2 m0, m1, m4, m5, m7, 6, r0, r3
lea r0, [r0+r3*2]
STORE_DIFFx2 m2, m3, m4, m5, m7, 6, r0, r3
%endif
%endmacro
-; ff_h264_idct_add16_sse2(uint8_t *dst, const int *block_offset,
-; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
-cglobal h264_idct_add16_8_sse2, 5, 5 + ARCH_X86_64, 8
+; void ff_h264_idct_add16_8_sse2(uint8_t *dst, const int *block_offset,
+; int16_t *block, int stride,
+; const uint8_t nnzc[6 * 8])
+cglobal h264_idct_add16_8, 5, 5 + ARCH_X86_64, 8
+ movsxdifnidn r3, r3d
%if ARCH_X86_64
mov r5, r0
%endif
%else
add r0, r0m
%endif
- call h264_idct_dc_add8_mmx2
+ call h264_idct_dc_add8_mmxext
.cycle%1end:
%if %1 < 7
add r2, 64
%endif
%endmacro
-; ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset,
-; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
-cglobal h264_idct_add16intra_8_sse2, 5, 7 + ARCH_X86_64, 8
+; void ff_h264_idct_add16intra_8_sse2(uint8_t *dst, const int *block_offset,
+; int16_t *block, int stride,
+; const uint8_t nnzc[6 * 8])
+cglobal h264_idct_add16intra_8, 5, 7 + ARCH_X86_64, 8
+ movsxdifnidn r3, r3d
%if ARCH_X86_64
mov r7, r0
%endif
mov r0, [r0]
add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
%endif
- call h264_idct_dc_add8_mmx2
+ call h264_idct_dc_add8_mmxext
.cycle%1end:
%if %1 == 1
add r2, 384+64
%endif
%endmacro
-; ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset,
-; DCTELEM *block, int stride, const uint8_t nnzc[6*8])
-cglobal h264_idct_add8_8_sse2, 5, 7 + ARCH_X86_64, 8
+; void ff_h264_idct_add8_8_sse2(uint8_t **dest, const int *block_offset,
+; int16_t *block, int stride,
+; const uint8_t nnzc[6 * 8])
+cglobal h264_idct_add8_8, 5, 7 + ARCH_X86_64, 8
+ movsxdifnidn r3, r3d
add r2, 512
%if ARCH_X86_64
mov r7, r0
add8_sse2_cycle 3, 0x64
RET
-;void ff_h264_luma_dc_dequant_idct_mmx(DCTELEM *output, DCTELEM *input, int qmul)
+;void ff_h264_luma_dc_dequant_idct_mmx(int16_t *output, int16_t *input, int qmul)
%macro WALSH4_1D 5
SUMSUB_BADC w, %4, %3, %2, %1, %5
packssdw %2, m5
%endmacro
-%macro STORE_WORDS_MMX 5
- movd t0d, %1
- psrlq %1, 32
- movd t1d, %1
- mov [t2+%2*32], t0w
- mov [t2+%4*32], t1w
- shr t0d, 16
- shr t1d, 16
- mov [t2+%3*32], t0w
- mov [t2+%5*32], t1w
-%endmacro
-
-%macro DEQUANT_STORE_MMX 1
- DEQUANT_MMX m0, m1, %1
- STORE_WORDS_MMX m0, 0, 1, 4, 5
- STORE_WORDS_MMX m1, 2, 3, 6, 7
-
- DEQUANT_MMX m2, m3, %1
- STORE_WORDS_MMX m2, 8, 9, 12, 13
- STORE_WORDS_MMX m3, 10, 11, 14, 15
-%endmacro
-
-%macro STORE_WORDS_SSE 9
+%macro STORE_WORDS 5-9
+%if cpuflag(sse)
movd t0d, %1
psrldq %1, 4
movd t1d, %1
shr t1d, 16
mov [t2+%7*32], t0w
mov [t2+%9*32], t1w
+%else
+ movd t0d, %1
+ psrlq %1, 32
+ movd t1d, %1
+ mov [t2+%2*32], t0w
+ mov [t2+%4*32], t1w
+ shr t0d, 16
+ shr t1d, 16
+ mov [t2+%3*32], t0w
+ mov [t2+%5*32], t1w
+%endif
%endmacro
-%macro DEQUANT_STORE_SSE2 1
+%macro DEQUANT_STORE 1
+%if cpuflag(sse2)
movd xmm4, t3d
movq xmm5, [pw_1]
pshufd xmm4, xmm4, 0
psrad xmm3, %1
packssdw xmm0, xmm1
packssdw xmm2, xmm3
- STORE_WORDS_SSE xmm0, 0, 1, 4, 5, 2, 3, 6, 7
- STORE_WORDS_SSE xmm2, 8, 9, 12, 13, 10, 11, 14, 15
+ STORE_WORDS xmm0, 0, 1, 4, 5, 2, 3, 6, 7
+ STORE_WORDS xmm2, 8, 9, 12, 13, 10, 11, 14, 15
+%else
+ DEQUANT_MMX m0, m1, %1
+ STORE_WORDS m0, 0, 1, 4, 5
+ STORE_WORDS m1, 2, 3, 6, 7
+
+ DEQUANT_MMX m2, m3, %1
+ STORE_WORDS m2, 8, 9, 12, 13
+ STORE_WORDS m3, 10, 11, 14, 15
+%endif
%endmacro
-%macro IDCT_DC_DEQUANT 2
-cglobal h264_luma_dc_dequant_idct_%1, 3,4,%2
+%macro IDCT_DC_DEQUANT 1
+cglobal h264_luma_dc_dequant_idct, 3, 4, %1
; manually spill XMM registers for Win64 because
; the code here is initialized with INIT_MMX
- WIN64_SPILL_XMM %2
+ WIN64_SPILL_XMM %1
movq m3, [r1+24]
movq m2, [r1+16]
movq m1, [r1+ 8]
cmp t3d, 32767
jg .big_qmul
add t3d, 128 << 16
-%ifidn %1,mmx
- DEQUANT_STORE_MMX 8
-%else
- DEQUANT_STORE_SSE2 8
-%endif
+ DEQUANT_STORE 8
RET
.big_qmul:
bsr t0d, t3d
inc t1d
shr t3d, t0b
sub t1d, t0d
-%ifidn %1,mmx
- movd m6, t1d
- DEQUANT_STORE_MMX m6
-%else
+%if cpuflag(sse2)
movd xmm6, t1d
- DEQUANT_STORE_SSE2 xmm6
+ DEQUANT_STORE xmm6
+%else
+ movd m6, t1d
+ DEQUANT_STORE m6
%endif
RET
%endmacro
-INIT_MMX
-IDCT_DC_DEQUANT mmx, 0
-IDCT_DC_DEQUANT sse2, 7
+INIT_MMX mmx
+IDCT_DC_DEQUANT 0
+INIT_MMX sse2
+IDCT_DC_DEQUANT 7