1 ;*****************************************************************************
2 ;* MMX/SSE2-optimized H.264 iDCT
3 ;*****************************************************************************
4 ;* Copyright (C) 2004-2005 Michael Niedermayer, Loren Merritt
5 ;* Copyright (C) 2003-2008 x264 project
7 ;* Authors: Laurent Aimar <fenrir@via.ecp.fr>
8 ;* Loren Merritt <lorenm@u.washington.edu>
9 ;* Holger Lubitz <hal@duncan.ol.sub.de>
10 ;* Min Chen <chenm001.163.com>
12 ;* This file is part of FFmpeg.
14 ;* FFmpeg is free software; you can redistribute it and/or
15 ;* modify it under the terms of the GNU Lesser General Public
16 ;* License as published by the Free Software Foundation; either
17 ;* version 2.1 of the License, or (at your option) any later version.
19 ;* FFmpeg is distributed in the hope that it will be useful,
20 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
21 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 ;* Lesser General Public License for more details.
24 ;* You should have received a copy of the GNU Lesser General Public
25 ;* License along with FFmpeg; if not, write to the Free Software
26 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 ;*****************************************************************************
29 %include "libavutil/x86/x86util.asm"
33 scan8_mem: db 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8
34 db 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8
35 db 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8
36 db 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8
37 db 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8
38 db 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8
39 db 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8
40 db 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8
41 db 4+11*8, 5+11*8, 4+12*8, 5+12*8
42 db 6+11*8, 7+11*8, 6+12*8, 7+12*8
43 db 4+13*8, 5+13*8, 4+14*8, 5+14*8
44 db 6+13*8, 7+13*8, 6+14*8, 7+14*8
50 %define scan8 scan8_mem
58 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
66 IDCT4_1D w, 0, 1, 2, 3, 4, 5
68 TRANSPOSE4x4W 0, 1, 2, 3, 4
70 IDCT4_1D w, 0, 1, 2, 3, 4, 5
77 STORE_DIFFx2 m0, m1, m4, m5, m7, 6, %1, %3
79 STORE_DIFFx2 m2, m3, m4, m5, m7, 6, %1, %3
83 ; void ff_h264_idct_add_8_mmx(uint8_t *dst, int16_t *block, int stride)
84 cglobal h264_idct_add_8, 3, 3, 0
136 SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
139 %macro IDCT8_1D_FULL 1
146 IDCT8_1D [%1], [%1+ 64]
149 ; %1=int16_t *block, %2=int16_t *dstblock
150 %macro IDCT8_ADD_MMX_START 2
153 TRANSPOSE4x4W 0, 1, 2, 3, 7
159 TRANSPOSE4x4W 4, 5, 6, 7, 3
166 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
167 %macro IDCT8_ADD_MMX_END 3-4
192 STORE_DIFFx2 m0, m1, m5, m6, m7, 6, %1, %3
194 STORE_DIFFx2 m2, m3, m5, m6, m7, 6, %1, %3
199 STORE_DIFFx2 m4, m0, m5, m6, m7, 6, %1, %3
201 STORE_DIFFx2 m1, m2, m5, m6, m7, 6, %1, %3
205 ; void ff_h264_idct8_add_8_mmx(uint8_t *dst, int16_t *block, int stride)
206 cglobal h264_idct8_add_8, 3, 4, 0
207 %assign pad 128+4-(stack_offset&7)
211 IDCT8_ADD_MMX_START r1 , rsp
212 IDCT8_ADD_MMX_START r1+8, rsp+64
214 IDCT8_ADD_MMX_END r0 , rsp, r2, r1
215 IDCT8_ADD_MMX_END r3 , rsp+8, r2
220 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
221 %macro IDCT8_ADD_SSE 4
224 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
226 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [%2], [%2+16]
233 IDCT8_1D [%2], [%2+ 16]
246 STORE_DIFF m0, m6, m7, [%1 ]
247 STORE_DIFF m1, m6, m7, [%1+%3 ]
248 STORE_DIFF m2, m6, m7, [%1+%3*2]
249 STORE_DIFF m3, m6, m7, [%1+%4 ]
266 STORE_DIFF m4, m6, m7, [%1 ]
267 STORE_DIFF m5, m6, m7, [%1+%3 ]
268 STORE_DIFF m0, m6, m7, [%1+%3*2]
269 STORE_DIFF m1, m6, m7, [%1+%4 ]
273 ; void ff_h264_idct8_add_8_sse2(uint8_t *dst, int16_t *block, int stride)
274 cglobal h264_idct8_add_8, 3, 4, 10
275 IDCT8_ADD_SSE r0, r1, r2, r3
278 %macro DC_ADD_MMXEXT_INIT 2
290 %macro DC_ADD_MMXEXT_OP 4
310 ; void ff_h264_idct_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
312 cglobal h264_idct_dc_add_8, 3, 4, 0
315 DC_ADD_MMXEXT_INIT r3, r2
316 DC_ADD_MMXEXT_OP movh, r0, r2, r3
319 ; void ff_h264_idct8_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
320 cglobal h264_idct8_dc_add_8, 3, 4, 0
323 DC_ADD_MMXEXT_INIT r3, r2
324 DC_ADD_MMXEXT_OP mova, r0, r2, r3
326 DC_ADD_MMXEXT_OP mova, r0, r2, r3
329 ; void ff_h264_idct_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
330 cglobal h264_idct_dc_add_8, 2, 3, 0
334 DC_ADD_MMXEXT_INIT r2, r1
335 DC_ADD_MMXEXT_OP movh, r0, r1, r2
338 ; void ff_h264_idct8_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
339 cglobal h264_idct8_dc_add_8, 2, 3, 0
343 DC_ADD_MMXEXT_INIT r2, r1
344 DC_ADD_MMXEXT_OP mova, r0, r1, r2
346 DC_ADD_MMXEXT_OP mova, r0, r1, r2
351 ; void ff_h264_idct_add16_8_mmx(uint8_t *dst, const int *block_offset,
352 ; int16_t *block, int stride,
353 ; const uint8_t nnzc[6 * 8])
354 cglobal h264_idct_add16_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
357 lea picregq, [scan8_mem]
360 movzx r6, byte [scan8+r5]
361 movzx r6, byte [r4+r6]
364 mov r6d, dword [r1+r5*4]
374 ; void ff_h264_idct8_add4_8_mmx(uint8_t *dst, const int *block_offset,
375 ; int16_t *block, int stride,
376 ; const uint8_t nnzc[6 * 8])
377 cglobal h264_idct8_add4_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
378 %assign pad 128+4-(stack_offset&7)
383 lea picregq, [scan8_mem]
386 movzx r6, byte [scan8+r5]
387 movzx r6, byte [r4+r6]
390 mov r6d, dword [r1+r5*4]
393 IDCT8_ADD_MMX_START r2 , rsp
394 IDCT8_ADD_MMX_START r2+8, rsp+64
395 IDCT8_ADD_MMX_END r6 , rsp, r3, r2
396 mov r6d, dword [r1+r5*4]
398 IDCT8_ADD_MMX_END r6 , rsp+8, r3
408 ; void ff_h264_idct_add16_8_mmxext(uint8_t *dst, const int *block_offset,
409 ; int16_t *block, int stride,
410 ; const uint8_t nnzc[6 * 8])
411 cglobal h264_idct_add16_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
414 lea picregq, [scan8_mem]
417 movzx r6, byte [scan8+r5]
418 movzx r6, byte [r4+r6]
427 DC_ADD_MMXEXT_INIT r6, r3
432 mov dst2d, dword [r1+r5*4]
433 lea dst2q, [r0+dst2q]
434 DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
444 mov r6d, dword [r1+r5*4]
455 ; void ff_h264_idct_add16intra_8_mmx(uint8_t *dst, const int *block_offset,
456 ; int16_t *block, int stride,
457 ; const uint8_t nnzc[6 * 8])
458 cglobal h264_idct_add16intra_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
461 lea picregq, [scan8_mem]
464 movzx r6, byte [scan8+r5]
465 movzx r6, byte [r4+r6]
469 mov r6d, dword [r1+r5*4]
480 ; void ff_h264_idct_add16intra_8_mmxext(uint8_t *dst, const int *block_offset,
481 ; int16_t *block, int stride,
482 ; const uint8_t nnzc[6 * 8])
483 cglobal h264_idct_add16intra_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
486 lea picregq, [scan8_mem]
489 movzx r6, byte [scan8+r5]
490 movzx r6, byte [r4+r6]
493 mov r6d, dword [r1+r5*4]
506 DC_ADD_MMXEXT_INIT r6, r3
511 mov dst2d, dword [r1+r5*4]
513 DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
524 ; void ff_h264_idct8_add4_8_mmxext(uint8_t *dst, const int *block_offset,
525 ; int16_t *block, int stride,
526 ; const uint8_t nnzc[6 * 8])
527 cglobal h264_idct8_add4_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
528 %assign pad 128+4-(stack_offset&7)
533 lea picregq, [scan8_mem]
536 movzx r6, byte [scan8+r5]
537 movzx r6, byte [r4+r6]
546 DC_ADD_MMXEXT_INIT r6, r3
551 mov dst2d, dword [r1+r5*4]
552 lea dst2q, [r0+dst2q]
553 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
554 lea dst2q, [dst2q+r3*4]
555 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
567 mov r6d, dword [r1+r5*4]
570 IDCT8_ADD_MMX_START r2 , rsp
571 IDCT8_ADD_MMX_START r2+8, rsp+64
572 IDCT8_ADD_MMX_END r6 , rsp, r3, r2
573 mov r6d, dword [r1+r5*4]
575 IDCT8_ADD_MMX_END r6 , rsp+8, r3
586 ; void ff_h264_idct8_add4_8_sse2(uint8_t *dst, const int *block_offset,
587 ; int16_t *block, int stride,
588 ; const uint8_t nnzc[6 * 8])
589 cglobal h264_idct8_add4_8, 5, 8 + npicregs, 10, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
592 lea picregq, [scan8_mem]
595 movzx r6, byte [scan8+r5]
596 movzx r6, byte [r4+r6]
606 DC_ADD_MMXEXT_INIT r6, r3
611 mov dst2d, dword [r1+r5*4]
613 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
614 lea dst2q, [dst2q+r3*4]
615 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
626 mov dst2d, dword [r1+r5*4]
628 IDCT8_ADD_SSE dst2q, r2, r3, r6
640 h264_idct_add8_mmx_plane:
642 movzx r6, byte [scan8+r5]
643 movzx r6, byte [r4+r6]
648 mov r0d, dword [r1+r5*4]
651 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
653 add r0, dword [r1+r5*4]
663 ; void ff_h264_idct_add8_8_mmx(uint8_t **dest, const int *block_offset,
664 ; int16_t *block, int stride,
665 ; const uint8_t nnzc[6 * 8])
666 cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
670 lea picregq, [scan8_mem]
675 call h264_idct_add8_mmx_plane
683 call h264_idct_add8_mmx_plane
686 h264_idct_add8_mmxext_plane:
688 movzx r6, byte [scan8+r5]
689 movzx r6, byte [r4+r6]
693 mov r0d, dword [r1+r5*4]
696 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
698 add r0, dword [r1+r5*4]
711 DC_ADD_MMXEXT_INIT r6, r3
713 mov r0d, dword [r1+r5*4]
716 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
718 add r0, dword [r1+r5*4]
720 DC_ADD_MMXEXT_OP movh, r0, r3, r6
729 ; void ff_h264_idct_add8_8_mmxext(uint8_t **dest, const int *block_offset,
730 ; int16_t *block, int stride,
731 ; const uint8_t nnzc[6 * 8])
732 cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
739 lea picregq, [scan8_mem]
741 call h264_idct_add8_mmxext_plane
749 call h264_idct_add8_mmxext_plane
752 ; r0 = uint8_t *dst, r2 = int16_t *block, r3 = int stride, r6=clobbered
753 h264_idct_dc_add8_mmxext:
754 movd m0, [r2 ] ; 0 0 X D
756 punpcklwd m0, [r2+32] ; x X d D
760 punpcklwd m0, m0 ; d d D D
761 pxor m1, m1 ; 0 0 0 0
762 psubw m1, m0 ; -d-d-D-D
763 packuswb m0, m1 ; -d-d-D-D d d D D
764 pshufw m1, m0, 0xFA ; -d-d-d-d-D-D-D-D
765 punpcklwd m0, m0 ; d d d d D D D D
767 DC_ADD_MMXEXT_OP movq, r0, r3, r6
772 ; r0 = uint8_t *dst (clobbered), r2 = int16_t *block, r3 = int stride
773 h264_add8x4_idct_sse2:
782 IDCT4_1D w,0,1,2,3,4,5
783 TRANSPOSE2x4x4W 0,1,2,3,4
785 IDCT4_1D w,0,1,2,3,4,5
791 STORE_DIFFx2 m0, m1, m4, m5, m7, 6, r0, r3
793 STORE_DIFFx2 m2, m3, m4, m5, m7, 6, r0, r3
796 %macro add16_sse2_cycle 2
797 movzx r0, word [r4+%2]
800 mov r0d, dword [r1+%1*8]
806 call h264_add8x4_idct_sse2
813 ; void ff_h264_idct_add16_8_sse2(uint8_t *dst, const int *block_offset,
814 ; int16_t *block, int stride,
815 ; const uint8_t nnzc[6 * 8])
816 cglobal h264_idct_add16_8, 5, 5 + ARCH_X86_64, 8
820 ; unrolling of the loop leads to an average performance gain of
822 add16_sse2_cycle 0, 0xc
823 add16_sse2_cycle 1, 0x14
824 add16_sse2_cycle 2, 0xe
825 add16_sse2_cycle 3, 0x16
826 add16_sse2_cycle 4, 0x1c
827 add16_sse2_cycle 5, 0x24
828 add16_sse2_cycle 6, 0x1e
829 add16_sse2_cycle 7, 0x26
832 %macro add16intra_sse2_cycle 2
833 movzx r0, word [r4+%2]
836 mov r0d, dword [r1+%1*8]
842 call h264_add8x4_idct_sse2
848 mov r0d, dword [r1+%1*8]
854 call h264_idct_dc_add8_mmxext
861 ; void ff_h264_idct_add16intra_8_sse2(uint8_t *dst, const int *block_offset,
862 ; int16_t *block, int stride,
863 ; const uint8_t nnzc[6 * 8])
864 cglobal h264_idct_add16intra_8, 5, 7 + ARCH_X86_64, 8
868 add16intra_sse2_cycle 0, 0xc
869 add16intra_sse2_cycle 1, 0x14
870 add16intra_sse2_cycle 2, 0xe
871 add16intra_sse2_cycle 3, 0x16
872 add16intra_sse2_cycle 4, 0x1c
873 add16intra_sse2_cycle 5, 0x24
874 add16intra_sse2_cycle 6, 0x1e
875 add16intra_sse2_cycle 7, 0x26
878 %macro add8_sse2_cycle 2
879 movzx r0, word [r4+%2]
883 mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
888 add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
890 call h264_add8x4_idct_sse2
897 mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
902 add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
904 call h264_idct_dc_add8_mmxext
913 ; void ff_h264_idct_add8_8_sse2(uint8_t **dest, const int *block_offset,
914 ; int16_t *block, int stride,
915 ; const uint8_t nnzc[6 * 8])
916 cglobal h264_idct_add8_8, 5, 7 + ARCH_X86_64, 8
921 add8_sse2_cycle 0, 0x34
922 add8_sse2_cycle 1, 0x3c
928 add8_sse2_cycle 2, 0x5c
929 add8_sse2_cycle 3, 0x64
932 ;void ff_h264_luma_dc_dequant_idct_mmx(int16_t *output, int16_t *input, int qmul)
935 SUMSUB_BADC w, %4, %3, %2, %1, %5
936 SUMSUB_BADC w, %4, %2, %3, %1, %5
962 %macro STORE_WORDS 5-9
996 %macro DEQUANT_STORE 1
1000 pshufd xmm4, xmm4, 0
1005 punpcklwd xmm0, xmm5
1006 punpcklwd xmm1, xmm5
1007 punpcklwd xmm2, xmm5
1008 punpcklwd xmm3, xmm5
1019 STORE_WORDS xmm0, 0, 1, 4, 5, 2, 3, 6, 7
1020 STORE_WORDS xmm2, 8, 9, 12, 13, 10, 11, 14, 15
1022 DEQUANT_MMX m0, m1, %1
1023 STORE_WORDS m0, 0, 1, 4, 5
1024 STORE_WORDS m1, 2, 3, 6, 7
1026 DEQUANT_MMX m2, m3, %1
1027 STORE_WORDS m2, 8, 9, 12, 13
1028 STORE_WORDS m3, 10, 11, 14, 15
1032 %macro IDCT_DC_DEQUANT 1
1033 cglobal h264_luma_dc_dequant_idct, 3, 4, %1
1034 ; manually spill XMM registers for Win64 because
1035 ; the code here is initialized with INIT_MMX
1042 TRANSPOSE4x4W 0,1,2,3,4
1045 ; shift, tmp, output, qmul
1047 DECLARE_REG_TMP 0,3,1,2
1048 ; we can't avoid this, because r0 is the shift register (ecx) on win64
1051 DECLARE_REG_TMP 3,1,0,2
1053 DECLARE_REG_TMP 1,3,0,2