1 ;*****************************************************************************
2 ;* MMX/SSE2-optimized H.264 iDCT
3 ;*****************************************************************************
4 ;* Copyright (C) 2004-2005 Michael Niedermayer, Loren Merritt
5 ;* Copyright (C) 2003-2008 x264 project
7 ;* Authors: Laurent Aimar <fenrir@via.ecp.fr>
8 ;* Loren Merritt <lorenm@u.washington.edu>
9 ;* Holger Lubitz <hal@duncan.ol.sub.de>
10 ;* Min Chen <chenm001.163.com>
12 ;* This file is part of FFmpeg.
14 ;* FFmpeg is free software; you can redistribute it and/or
15 ;* modify it under the terms of the GNU Lesser General Public
16 ;* License as published by the Free Software Foundation; either
17 ;* version 2.1 of the License, or (at your option) any later version.
19 ;* FFmpeg is distributed in the hope that it will be useful,
20 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
21 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 ;* Lesser General Public License for more details.
24 ;* You should have received a copy of the GNU Lesser General Public
25 ;* License along with FFmpeg; if not, write to the Free Software
26 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 ;*****************************************************************************
29 %include "libavutil/x86/x86util.asm"
33 ; FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split
34 scan8_mem: db 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8
35 db 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8
36 db 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8
37 db 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8
38 db 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8
39 db 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8
40 db 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8
41 db 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8
42 db 4+11*8, 5+11*8, 4+12*8, 5+12*8
43 db 6+11*8, 7+11*8, 6+12*8, 7+12*8
44 db 4+13*8, 5+13*8, 4+14*8, 5+14*8
45 db 6+13*8, 7+13*8, 6+14*8, 7+14*8
51 %define scan8 scan8_mem
59 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
67 IDCT4_1D w, 0, 1, 2, 3, 4, 5
69 TRANSPOSE4x4W 0, 1, 2, 3, 4
71 IDCT4_1D w, 0, 1, 2, 3, 4, 5
78 STORE_DIFFx2 m0, m1, m4, m5, m7, 6, %1, %3
80 STORE_DIFFx2 m2, m3, m4, m5, m7, 6, %1, %3
84 ; ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
85 cglobal h264_idct_add_8, 3, 3, 0
137 SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
140 %macro IDCT8_1D_FULL 1
147 IDCT8_1D [%1], [%1+ 64]
150 ; %1=int16_t *block, %2=int16_t *dstblock
151 %macro IDCT8_ADD_MMX_START 2
154 TRANSPOSE4x4W 0, 1, 2, 3, 7
160 TRANSPOSE4x4W 4, 5, 6, 7, 3
167 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
168 %macro IDCT8_ADD_MMX_END 3-4
193 STORE_DIFFx2 m0, m1, m5, m6, m7, 6, %1, %3
195 STORE_DIFFx2 m2, m3, m5, m6, m7, 6, %1, %3
200 STORE_DIFFx2 m4, m0, m5, m6, m7, 6, %1, %3
202 STORE_DIFFx2 m1, m2, m5, m6, m7, 6, %1, %3
206 ; ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
207 cglobal h264_idct8_add_8, 3, 4, 0
208 %assign pad 128+4-(stack_offset&7)
212 IDCT8_ADD_MMX_START r1 , rsp
213 IDCT8_ADD_MMX_START r1+8, rsp+64
215 IDCT8_ADD_MMX_END r0 , rsp, r2, r1
216 IDCT8_ADD_MMX_END r3 , rsp+8, r2
221 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
222 %macro IDCT8_ADD_SSE 4
225 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
227 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [%2], [%2+16]
234 IDCT8_1D [%2], [%2+ 16]
247 STORE_DIFF m0, m6, m7, [%1 ]
248 STORE_DIFF m1, m6, m7, [%1+%3 ]
249 STORE_DIFF m2, m6, m7, [%1+%3*2]
250 STORE_DIFF m3, m6, m7, [%1+%4 ]
267 STORE_DIFF m4, m6, m7, [%1 ]
268 STORE_DIFF m5, m6, m7, [%1+%3 ]
269 STORE_DIFF m0, m6, m7, [%1+%3*2]
270 STORE_DIFF m1, m6, m7, [%1+%4 ]
274 ; ff_h264_idct8_add_sse2(uint8_t *dst, int16_t *block, int stride)
275 cglobal h264_idct8_add_8, 3, 4, 10
276 IDCT8_ADD_SSE r0, r1, r2, r3
279 %macro DC_ADD_MMXEXT_INIT 2
291 %macro DC_ADD_MMXEXT_OP 4
311 ; ff_h264_idct_dc_add_mmxext(uint8_t *dst, int16_t *block, int stride)
313 cglobal h264_idct_dc_add_8, 3, 4, 0
316 DC_ADD_MMXEXT_INIT r3, r2
317 DC_ADD_MMXEXT_OP movh, r0, r2, r3
320 ; ff_h264_idct8_dc_add_mmxext(uint8_t *dst, int16_t *block, int stride)
321 cglobal h264_idct8_dc_add_8, 3, 4, 0
324 DC_ADD_MMXEXT_INIT r3, r2
325 DC_ADD_MMXEXT_OP mova, r0, r2, r3
327 DC_ADD_MMXEXT_OP mova, r0, r2, r3
330 cglobal h264_idct_dc_add_8, 2, 3, 0
334 DC_ADD_MMXEXT_INIT r2, r1
335 DC_ADD_MMXEXT_OP movh, r0, r1, r2
338 ; ff_h264_idct8_dc_add_mmxext(uint8_t *dst, int16_t *block, int stride)
339 cglobal h264_idct8_dc_add_8, 2, 3, 0
343 DC_ADD_MMXEXT_INIT r2, r1
344 DC_ADD_MMXEXT_OP mova, r0, r1, r2
346 DC_ADD_MMXEXT_OP mova, r0, r1, r2
351 ; ff_h264_idct_add16_mmx(uint8_t *dst, const int *block_offset,
352 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
353 cglobal h264_idct_add16_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
356 lea picregq, [scan8_mem]
359 movzx r6, byte [scan8+r5]
360 movzx r6, byte [r4+r6]
363 mov r6d, dword [r1+r5*4]
373 ; ff_h264_idct8_add4_mmx(uint8_t *dst, const int *block_offset,
374 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
375 cglobal h264_idct8_add4_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
376 %assign pad 128+4-(stack_offset&7)
381 lea picregq, [scan8_mem]
384 movzx r6, byte [scan8+r5]
385 movzx r6, byte [r4+r6]
388 mov r6d, dword [r1+r5*4]
391 IDCT8_ADD_MMX_START r2 , rsp
392 IDCT8_ADD_MMX_START r2+8, rsp+64
393 IDCT8_ADD_MMX_END r6 , rsp, r3, r2
394 mov r6d, dword [r1+r5*4]
396 IDCT8_ADD_MMX_END r6 , rsp+8, r3
406 ; ff_h264_idct_add16_mmxext(uint8_t *dst, const int *block_offset,
407 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
408 cglobal h264_idct_add16_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
411 lea picregq, [scan8_mem]
414 movzx r6, byte [scan8+r5]
415 movzx r6, byte [r4+r6]
424 DC_ADD_MMXEXT_INIT r6, r3
429 mov dst2d, dword [r1+r5*4]
430 lea dst2q, [r0+dst2q]
431 DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
441 mov r6d, dword [r1+r5*4]
452 ; ff_h264_idct_add16intra_mmx(uint8_t *dst, const int *block_offset,
453 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
454 cglobal h264_idct_add16intra_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
457 lea picregq, [scan8_mem]
460 movzx r6, byte [scan8+r5]
461 movzx r6, byte [r4+r6]
465 mov r6d, dword [r1+r5*4]
476 ; ff_h264_idct_add16intra_mmxext(uint8_t *dst, const int *block_offset,
477 ; int16_t *block, int stride,
478 ; const uint8_t nnzc[6*8])
479 cglobal h264_idct_add16intra_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
482 lea picregq, [scan8_mem]
485 movzx r6, byte [scan8+r5]
486 movzx r6, byte [r4+r6]
489 mov r6d, dword [r1+r5*4]
502 DC_ADD_MMXEXT_INIT r6, r3
507 mov dst2d, dword [r1+r5*4]
509 DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
520 ; ff_h264_idct8_add4_mmxext(uint8_t *dst, const int *block_offset,
521 ; int16_t *block, int stride,
522 ; const uint8_t nnzc[6*8])
523 cglobal h264_idct8_add4_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
524 %assign pad 128+4-(stack_offset&7)
529 lea picregq, [scan8_mem]
532 movzx r6, byte [scan8+r5]
533 movzx r6, byte [r4+r6]
542 DC_ADD_MMXEXT_INIT r6, r3
547 mov dst2d, dword [r1+r5*4]
548 lea dst2q, [r0+dst2q]
549 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
550 lea dst2q, [dst2q+r3*4]
551 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
563 mov r6d, dword [r1+r5*4]
566 IDCT8_ADD_MMX_START r2 , rsp
567 IDCT8_ADD_MMX_START r2+8, rsp+64
568 IDCT8_ADD_MMX_END r6 , rsp, r3, r2
569 mov r6d, dword [r1+r5*4]
571 IDCT8_ADD_MMX_END r6 , rsp+8, r3
582 ; ff_h264_idct8_add4_sse2(uint8_t *dst, const int *block_offset,
583 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
584 cglobal h264_idct8_add4_8, 5, 8 + npicregs, 10, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
587 lea picregq, [scan8_mem]
590 movzx r6, byte [scan8+r5]
591 movzx r6, byte [r4+r6]
601 DC_ADD_MMXEXT_INIT r6, r3
606 mov dst2d, dword [r1+r5*4]
608 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
609 lea dst2q, [dst2q+r3*4]
610 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
621 mov dst2d, dword [r1+r5*4]
623 IDCT8_ADD_SSE dst2q, r2, r3, r6
635 h264_idct_add8_mmx_plane:
637 movzx r6, byte [scan8+r5]
638 movzx r6, byte [r4+r6]
643 mov r0d, dword [r1+r5*4]
646 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
648 add r0, dword [r1+r5*4]
658 ; ff_h264_idct_add8_mmx(uint8_t **dest, const int *block_offset,
659 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
660 cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
664 lea picregq, [scan8_mem]
669 call h264_idct_add8_mmx_plane
677 call h264_idct_add8_mmx_plane
680 h264_idct_add8_mmxext_plane:
682 movzx r6, byte [scan8+r5]
683 movzx r6, byte [r4+r6]
687 mov r0d, dword [r1+r5*4]
690 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
692 add r0, dword [r1+r5*4]
705 DC_ADD_MMXEXT_INIT r6, r3
707 mov r0d, dword [r1+r5*4]
710 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
712 add r0, dword [r1+r5*4]
714 DC_ADD_MMXEXT_OP movh, r0, r3, r6
723 ; ff_h264_idct_add8_mmxext(uint8_t **dest, const int *block_offset,
724 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
725 cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
732 lea picregq, [scan8_mem]
734 call h264_idct_add8_mmxext_plane
742 call h264_idct_add8_mmxext_plane
745 ; r0 = uint8_t *dst, r2 = int16_t *block, r3 = int stride, r6=clobbered
746 h264_idct_dc_add8_mmxext:
747 movd m0, [r2 ] ; 0 0 X D
749 punpcklwd m0, [r2+32] ; x X d D
753 punpcklwd m0, m0 ; d d D D
754 pxor m1, m1 ; 0 0 0 0
755 psubw m1, m0 ; -d-d-D-D
756 packuswb m0, m1 ; -d-d-D-D d d D D
757 pshufw m1, m0, 0xFA ; -d-d-d-d-D-D-D-D
758 punpcklwd m0, m0 ; d d d d D D D D
760 DC_ADD_MMXEXT_OP movq, r0, r3, r6
765 ; r0 = uint8_t *dst (clobbered), r2 = int16_t *block, r3 = int stride
766 h264_add8x4_idct_sse2:
775 IDCT4_1D w,0,1,2,3,4,5
776 TRANSPOSE2x4x4W 0,1,2,3,4
778 IDCT4_1D w,0,1,2,3,4,5
784 STORE_DIFFx2 m0, m1, m4, m5, m7, 6, r0, r3
786 STORE_DIFFx2 m2, m3, m4, m5, m7, 6, r0, r3
789 %macro add16_sse2_cycle 2
790 movzx r0, word [r4+%2]
793 mov r0d, dword [r1+%1*8]
799 call h264_add8x4_idct_sse2
806 ; ff_h264_idct_add16_sse2(uint8_t *dst, const int *block_offset,
807 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
808 cglobal h264_idct_add16_8, 5, 5 + ARCH_X86_64, 8
812 ; unrolling of the loop leads to an average performance gain of
814 add16_sse2_cycle 0, 0xc
815 add16_sse2_cycle 1, 0x14
816 add16_sse2_cycle 2, 0xe
817 add16_sse2_cycle 3, 0x16
818 add16_sse2_cycle 4, 0x1c
819 add16_sse2_cycle 5, 0x24
820 add16_sse2_cycle 6, 0x1e
821 add16_sse2_cycle 7, 0x26
824 %macro add16intra_sse2_cycle 2
825 movzx r0, word [r4+%2]
828 mov r0d, dword [r1+%1*8]
834 call h264_add8x4_idct_sse2
840 mov r0d, dword [r1+%1*8]
846 call h264_idct_dc_add8_mmxext
853 ; ff_h264_idct_add16intra_sse2(uint8_t *dst, const int *block_offset,
854 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
855 cglobal h264_idct_add16intra_8, 5, 7 + ARCH_X86_64, 8
859 add16intra_sse2_cycle 0, 0xc
860 add16intra_sse2_cycle 1, 0x14
861 add16intra_sse2_cycle 2, 0xe
862 add16intra_sse2_cycle 3, 0x16
863 add16intra_sse2_cycle 4, 0x1c
864 add16intra_sse2_cycle 5, 0x24
865 add16intra_sse2_cycle 6, 0x1e
866 add16intra_sse2_cycle 7, 0x26
869 %macro add8_sse2_cycle 2
870 movzx r0, word [r4+%2]
874 mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
879 add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
881 call h264_add8x4_idct_sse2
888 mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
893 add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
895 call h264_idct_dc_add8_mmxext
904 ; ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset,
905 ; int16_t *block, int stride, const uint8_t nnzc[6*8])
906 cglobal h264_idct_add8_8, 5, 7 + ARCH_X86_64, 8
911 add8_sse2_cycle 0, 0x34
912 add8_sse2_cycle 1, 0x3c
918 add8_sse2_cycle 2, 0x5c
919 add8_sse2_cycle 3, 0x64
922 ;void ff_h264_luma_dc_dequant_idct_mmx(int16_t *output, int16_t *input, int qmul)
925 SUMSUB_BADC w, %4, %3, %2, %1, %5
926 SUMSUB_BADC w, %4, %2, %3, %1, %5
952 %macro STORE_WORDS 5-9
986 %macro DEQUANT_STORE 1
1009 STORE_WORDS xmm0, 0, 1, 4, 5, 2, 3, 6, 7
1010 STORE_WORDS xmm2, 8, 9, 12, 13, 10, 11, 14, 15
1012 DEQUANT_MMX m0, m1, %1
1013 STORE_WORDS m0, 0, 1, 4, 5
1014 STORE_WORDS m1, 2, 3, 6, 7
1016 DEQUANT_MMX m2, m3, %1
1017 STORE_WORDS m2, 8, 9, 12, 13
1018 STORE_WORDS m3, 10, 11, 14, 15
1022 %macro IDCT_DC_DEQUANT 1
1023 cglobal h264_luma_dc_dequant_idct, 3, 4, %1
1024 ; manually spill XMM registers for Win64 because
1025 ; the code here is initialized with INIT_MMX
1032 TRANSPOSE4x4W 0,1,2,3,4
1035 ; shift, tmp, output, qmul
1037 DECLARE_REG_TMP 0,3,1,2
1038 ; we can't avoid this, because r0 is the shift register (ecx) on win64
1041 DECLARE_REG_TMP 3,1,0,2
1043 DECLARE_REG_TMP 1,3,0,2