1 ;*****************************************************************************
2 ;* MMX/SSE2-optimized H.264 iDCT
3 ;*****************************************************************************
4 ;* Copyright (C) 2004-2005 Michael Niedermayer, Loren Merritt
5 ;* Copyright (C) 2003-2008 x264 project
7 ;* Authors: Laurent Aimar <fenrir@via.ecp.fr>
8 ;* Loren Merritt <lorenm@u.washington.edu>
9 ;* Holger Lubitz <hal@duncan.ol.sub.de>
10 ;* Min Chen <chenm001.163.com>
12 ;* This file is part of FFmpeg.
14 ;* FFmpeg is free software; you can redistribute it and/or
15 ;* modify it under the terms of the GNU Lesser General Public
16 ;* License as published by the Free Software Foundation; either
17 ;* version 2.1 of the License, or (at your option) any later version.
19 ;* FFmpeg is distributed in the hope that it will be useful,
20 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
21 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 ;* Lesser General Public License for more details.
24 ;* You should have received a copy of the GNU Lesser General Public
25 ;* License along with FFmpeg; if not, write to the Free Software
26 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 ;*****************************************************************************
29 %include "libavutil/x86/x86util.asm"
33 scan8_mem: db 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8
34 db 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8
35 db 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8
36 db 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8
37 db 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8
38 db 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8
39 db 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8
40 db 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8
41 db 4+11*8, 5+11*8, 4+12*8, 5+12*8
42 db 6+11*8, 7+11*8, 6+12*8, 7+12*8
43 db 4+13*8, 5+13*8, 4+14*8, 5+14*8
44 db 6+13*8, 7+13*8, 6+14*8, 7+14*8
50 %define scan8 scan8_mem
58 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
66 IDCT4_1D w, 0, 1, 2, 3, 4, 5
68 TRANSPOSE4x4W 0, 1, 2, 3, 4
70 IDCT4_1D w, 0, 1, 2, 3, 4, 5
77 STORE_DIFFx2 m0, m1, m4, m5, m7, 6, %1, %3
79 STORE_DIFFx2 m2, m3, m4, m5, m7, 6, %1, %3
83 ; void ff_h264_idct_add_8_mmx(uint8_t *dst, int16_t *block, int stride)
84 cglobal h264_idct_add_8, 3, 3, 0
137 SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
140 %macro IDCT8_1D_FULL 1
147 IDCT8_1D [%1], [%1+ 64]
150 ; %1=int16_t *block, %2=int16_t *dstblock
151 %macro IDCT8_ADD_MMX_START 2
154 TRANSPOSE4x4W 0, 1, 2, 3, 7
160 TRANSPOSE4x4W 4, 5, 6, 7, 3
167 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
168 %macro IDCT8_ADD_MMX_END 3-4
193 STORE_DIFFx2 m0, m1, m5, m6, m7, 6, %1, %3
195 STORE_DIFFx2 m2, m3, m5, m6, m7, 6, %1, %3
200 STORE_DIFFx2 m4, m0, m5, m6, m7, 6, %1, %3
202 STORE_DIFFx2 m1, m2, m5, m6, m7, 6, %1, %3
206 ; void ff_h264_idct8_add_8_mmx(uint8_t *dst, int16_t *block, int stride)
207 cglobal h264_idct8_add_8, 3, 4, 0
209 %assign pad 128+4-(stack_offset&7)
213 IDCT8_ADD_MMX_START r1 , rsp
214 IDCT8_ADD_MMX_START r1+8, rsp+64
216 IDCT8_ADD_MMX_END r0 , rsp, r2, r1
217 IDCT8_ADD_MMX_END r3 , rsp+8, r2
222 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
223 %macro IDCT8_ADD_SSE 4
226 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
228 TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [%2], [%2+16]
235 IDCT8_1D [%2], [%2+ 16]
248 STORE_DIFF m0, m6, m7, [%1 ]
249 STORE_DIFF m1, m6, m7, [%1+%3 ]
250 STORE_DIFF m2, m6, m7, [%1+%3*2]
251 STORE_DIFF m3, m6, m7, [%1+%4 ]
268 STORE_DIFF m4, m6, m7, [%1 ]
269 STORE_DIFF m5, m6, m7, [%1+%3 ]
270 STORE_DIFF m0, m6, m7, [%1+%3*2]
271 STORE_DIFF m1, m6, m7, [%1+%4 ]
275 ; void ff_h264_idct8_add_8_sse2(uint8_t *dst, int16_t *block, int stride)
276 cglobal h264_idct8_add_8, 3, 4, 10
278 IDCT8_ADD_SSE r0, r1, r2, r3
281 %macro DC_ADD_MMXEXT_INIT 2
293 %macro DC_ADD_MMXEXT_OP 4
313 ; void ff_h264_idct_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
315 cglobal h264_idct_dc_add_8, 3, 4, 0
319 DC_ADD_MMXEXT_INIT r3, r2
320 DC_ADD_MMXEXT_OP movh, r0, r2, r3
323 ; void ff_h264_idct8_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
324 cglobal h264_idct8_dc_add_8, 3, 4, 0
328 DC_ADD_MMXEXT_INIT r3, r2
329 DC_ADD_MMXEXT_OP mova, r0, r2, r3
331 DC_ADD_MMXEXT_OP mova, r0, r2, r3
334 ; void ff_h264_idct_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
335 cglobal h264_idct_dc_add_8, 2, 3, 0
339 DC_ADD_MMXEXT_INIT r2, r1
340 DC_ADD_MMXEXT_OP movh, r0, r1, r2
343 ; void ff_h264_idct8_dc_add_8_mmxext(uint8_t *dst, int16_t *block, int stride)
344 cglobal h264_idct8_dc_add_8, 2, 3, 0
348 DC_ADD_MMXEXT_INIT r2, r1
349 DC_ADD_MMXEXT_OP mova, r0, r1, r2
351 DC_ADD_MMXEXT_OP mova, r0, r1, r2
356 ; void ff_h264_idct_add16_8_mmx(uint8_t *dst, const int *block_offset,
357 ; int16_t *block, int stride,
358 ; const uint8_t nnzc[6 * 8])
359 cglobal h264_idct_add16_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
363 lea picregq, [scan8_mem]
366 movzx r6, byte [scan8+r5]
367 movzx r6, byte [r4+r6]
370 mov r6d, dword [r1+r5*4]
380 ; void ff_h264_idct8_add4_8_mmx(uint8_t *dst, const int *block_offset,
381 ; int16_t *block, int stride,
382 ; const uint8_t nnzc[6 * 8])
383 cglobal h264_idct8_add4_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
385 %assign pad 128+4-(stack_offset&7)
390 lea picregq, [scan8_mem]
393 movzx r6, byte [scan8+r5]
394 movzx r6, byte [r4+r6]
397 mov r6d, dword [r1+r5*4]
400 IDCT8_ADD_MMX_START r2 , rsp
401 IDCT8_ADD_MMX_START r2+8, rsp+64
402 IDCT8_ADD_MMX_END r6 , rsp, r3, r2
403 mov r6d, dword [r1+r5*4]
405 IDCT8_ADD_MMX_END r6 , rsp+8, r3
415 ; void ff_h264_idct_add16_8_mmxext(uint8_t *dst, const int *block_offset,
416 ; int16_t *block, int stride,
417 ; const uint8_t nnzc[6 * 8])
418 cglobal h264_idct_add16_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
422 lea picregq, [scan8_mem]
425 movzx r6, byte [scan8+r5]
426 movzx r6, byte [r4+r6]
435 DC_ADD_MMXEXT_INIT r6, r3
440 mov dst2d, dword [r1+r5*4]
441 lea dst2q, [r0+dst2q]
442 DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
452 mov r6d, dword [r1+r5*4]
463 ; void ff_h264_idct_add16intra_8_mmx(uint8_t *dst, const int *block_offset,
464 ; int16_t *block, int stride,
465 ; const uint8_t nnzc[6 * 8])
466 cglobal h264_idct_add16intra_8, 5, 7 + npicregs, 0, dst, block_offset, block, stride, nnzc, cntr, coeff, picreg
470 lea picregq, [scan8_mem]
473 movzx r6, byte [scan8+r5]
474 movzx r6, byte [r4+r6]
478 mov r6d, dword [r1+r5*4]
489 ; void ff_h264_idct_add16intra_8_mmxext(uint8_t *dst, const int *block_offset,
490 ; int16_t *block, int stride,
491 ; const uint8_t nnzc[6 * 8])
492 cglobal h264_idct_add16intra_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
496 lea picregq, [scan8_mem]
499 movzx r6, byte [scan8+r5]
500 movzx r6, byte [r4+r6]
503 mov r6d, dword [r1+r5*4]
516 DC_ADD_MMXEXT_INIT r6, r3
521 mov dst2d, dword [r1+r5*4]
523 DC_ADD_MMXEXT_OP movh, dst2q, r3, r6
534 ; void ff_h264_idct8_add4_8_mmxext(uint8_t *dst, const int *block_offset,
535 ; int16_t *block, int stride,
536 ; const uint8_t nnzc[6 * 8])
537 cglobal h264_idct8_add4_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
539 %assign pad 128+4-(stack_offset&7)
544 lea picregq, [scan8_mem]
547 movzx r6, byte [scan8+r5]
548 movzx r6, byte [r4+r6]
557 DC_ADD_MMXEXT_INIT r6, r3
562 mov dst2d, dword [r1+r5*4]
563 lea dst2q, [r0+dst2q]
564 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
565 lea dst2q, [dst2q+r3*4]
566 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
578 mov r6d, dword [r1+r5*4]
581 IDCT8_ADD_MMX_START r2 , rsp
582 IDCT8_ADD_MMX_START r2+8, rsp+64
583 IDCT8_ADD_MMX_END r6 , rsp, r3, r2
584 mov r6d, dword [r1+r5*4]
586 IDCT8_ADD_MMX_END r6 , rsp+8, r3
597 ; void ff_h264_idct8_add4_8_sse2(uint8_t *dst, const int *block_offset,
598 ; int16_t *block, int stride,
599 ; const uint8_t nnzc[6 * 8])
600 cglobal h264_idct8_add4_8, 5, 8 + npicregs, 10, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
604 lea picregq, [scan8_mem]
607 movzx r6, byte [scan8+r5]
608 movzx r6, byte [r4+r6]
618 DC_ADD_MMXEXT_INIT r6, r3
623 mov dst2d, dword [r1+r5*4]
625 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
626 lea dst2q, [dst2q+r3*4]
627 DC_ADD_MMXEXT_OP mova, dst2q, r3, r6
638 mov dst2d, dword [r1+r5*4]
640 IDCT8_ADD_SSE dst2q, r2, r3, r6
652 h264_idct_add8_mmx_plane:
655 movzx r6, byte [scan8+r5]
656 movzx r6, byte [r4+r6]
661 mov r0d, dword [r1+r5*4]
664 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
666 add r0, dword [r1+r5*4]
676 ; void ff_h264_idct_add8_8_mmx(uint8_t **dest, const int *block_offset,
677 ; int16_t *block, int stride,
678 ; const uint8_t nnzc[6 * 8])
679 cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
684 lea picregq, [scan8_mem]
689 call h264_idct_add8_mmx_plane
697 call h264_idct_add8_mmx_plane
700 h264_idct_add8_mmxext_plane:
703 movzx r6, byte [scan8+r5]
704 movzx r6, byte [r4+r6]
708 mov r0d, dword [r1+r5*4]
711 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
713 add r0, dword [r1+r5*4]
726 DC_ADD_MMXEXT_INIT r6, r3
728 mov r0d, dword [r1+r5*4]
731 mov r0, r1m ; XXX r1m here is actually r0m of the calling func
733 add r0, dword [r1+r5*4]
735 DC_ADD_MMXEXT_OP movh, r0, r3, r6
744 ; void ff_h264_idct_add8_8_mmxext(uint8_t **dest, const int *block_offset,
745 ; int16_t *block, int stride,
746 ; const uint8_t nnzc[6 * 8])
747 cglobal h264_idct_add8_8, 5, 8 + npicregs, 0, dst1, block_offset, block, stride, nnzc, cntr, coeff, dst2, picreg
755 lea picregq, [scan8_mem]
757 call h264_idct_add8_mmxext_plane
765 call h264_idct_add8_mmxext_plane
768 ; r0 = uint8_t *dst, r2 = int16_t *block, r3 = int stride, r6=clobbered
769 h264_idct_dc_add8_mmxext:
771 movd m0, [r2 ] ; 0 0 X D
773 punpcklwd m0, [r2+32] ; x X d D
777 punpcklwd m0, m0 ; d d D D
778 pxor m1, m1 ; 0 0 0 0
779 psubw m1, m0 ; -d-d-D-D
780 packuswb m0, m1 ; -d-d-D-D d d D D
781 pshufw m1, m0, 0xFA ; -d-d-d-d-D-D-D-D
782 punpcklwd m0, m0 ; d d d d D D D D
784 DC_ADD_MMXEXT_OP movq, r0, r3, r6
789 ; r0 = uint8_t *dst (clobbered), r2 = int16_t *block, r3 = int stride
790 h264_add8x4_idct_sse2:
800 IDCT4_1D w,0,1,2,3,4,5
801 TRANSPOSE2x4x4W 0,1,2,3,4
803 IDCT4_1D w,0,1,2,3,4,5
809 STORE_DIFFx2 m0, m1, m4, m5, m7, 6, r0, r3
811 STORE_DIFFx2 m2, m3, m4, m5, m7, 6, r0, r3
814 %macro add16_sse2_cycle 2
815 movzx r0, word [r4+%2]
818 mov r0d, dword [r1+%1*8]
824 call h264_add8x4_idct_sse2
831 ; void ff_h264_idct_add16_8_sse2(uint8_t *dst, const int *block_offset,
832 ; int16_t *block, int stride,
833 ; const uint8_t nnzc[6 * 8])
834 cglobal h264_idct_add16_8, 5, 5 + ARCH_X86_64, 8
839 ; unrolling of the loop leads to an average performance gain of
841 add16_sse2_cycle 0, 0xc
842 add16_sse2_cycle 1, 0x14
843 add16_sse2_cycle 2, 0xe
844 add16_sse2_cycle 3, 0x16
845 add16_sse2_cycle 4, 0x1c
846 add16_sse2_cycle 5, 0x24
847 add16_sse2_cycle 6, 0x1e
848 add16_sse2_cycle 7, 0x26
851 %macro add16intra_sse2_cycle 2
852 movzx r0, word [r4+%2]
855 mov r0d, dword [r1+%1*8]
861 call h264_add8x4_idct_sse2
867 mov r0d, dword [r1+%1*8]
873 call h264_idct_dc_add8_mmxext
880 ; void ff_h264_idct_add16intra_8_sse2(uint8_t *dst, const int *block_offset,
881 ; int16_t *block, int stride,
882 ; const uint8_t nnzc[6 * 8])
883 cglobal h264_idct_add16intra_8, 5, 7 + ARCH_X86_64, 8
888 add16intra_sse2_cycle 0, 0xc
889 add16intra_sse2_cycle 1, 0x14
890 add16intra_sse2_cycle 2, 0xe
891 add16intra_sse2_cycle 3, 0x16
892 add16intra_sse2_cycle 4, 0x1c
893 add16intra_sse2_cycle 5, 0x24
894 add16intra_sse2_cycle 6, 0x1e
895 add16intra_sse2_cycle 7, 0x26
898 %macro add8_sse2_cycle 2
899 movzx r0, word [r4+%2]
903 mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
908 add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
910 call h264_add8x4_idct_sse2
917 mov r0d, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
922 add r0, dword [r1+(%1&1)*8+64*(1+(%1>>1))]
924 call h264_idct_dc_add8_mmxext
933 ; void ff_h264_idct_add8_8_sse2(uint8_t **dest, const int *block_offset,
934 ; int16_t *block, int stride,
935 ; const uint8_t nnzc[6 * 8])
936 cglobal h264_idct_add8_8, 5, 7 + ARCH_X86_64, 8
942 add8_sse2_cycle 0, 0x34
943 add8_sse2_cycle 1, 0x3c
949 add8_sse2_cycle 2, 0x5c
950 add8_sse2_cycle 3, 0x64
953 ;void ff_h264_luma_dc_dequant_idct_mmx(int16_t *output, int16_t *input, int qmul)
956 SUMSUB_BADC w, %4, %3, %2, %1, %5
957 SUMSUB_BADC w, %4, %2, %3, %1, %5
983 %macro STORE_WORDS 5-9
1017 %macro DEQUANT_STORE 1
1021 pshufd xmm4, xmm4, 0
1026 punpcklwd xmm0, xmm5
1027 punpcklwd xmm1, xmm5
1028 punpcklwd xmm2, xmm5
1029 punpcklwd xmm3, xmm5
1040 STORE_WORDS xmm0, 0, 1, 4, 5, 2, 3, 6, 7
1041 STORE_WORDS xmm2, 8, 9, 12, 13, 10, 11, 14, 15
1043 DEQUANT_MMX m0, m1, %1
1044 STORE_WORDS m0, 0, 1, 4, 5
1045 STORE_WORDS m1, 2, 3, 6, 7
1047 DEQUANT_MMX m2, m3, %1
1048 STORE_WORDS m2, 8, 9, 12, 13
1049 STORE_WORDS m3, 10, 11, 14, 15
1053 %macro IDCT_DC_DEQUANT 1
1054 cglobal h264_luma_dc_dequant_idct, 3, 4, %1
1055 ; manually spill XMM registers for Win64 because
1056 ; the code here is initialized with INIT_MMX
1063 TRANSPOSE4x4W 0,1,2,3,4
1066 ; shift, tmp, output, qmul
1068 DECLARE_REG_TMP 0,3,1,2
1069 ; we can't avoid this, because r0 is the shift register (ecx) on win64
1072 DECLARE_REG_TMP 3,1,0,2
1074 DECLARE_REG_TMP 1,3,0,2