1 ;*****************************************************************************
2 ;* MMX/SSE2/AVX-optimized 10-bit H.264 iDCT code
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2011 x264 project
6 ;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
8 ;* This file is part of Libav.
10 ;* Libav is free software; you can redistribute it and/or
11 ;* modify it under the terms of the GNU Lesser General Public
12 ;* License as published by the Free Software Foundation; either
13 ;* version 2.1 of the License, or (at your option) any later version.
15 ;* Libav is distributed in the hope that it will be useful,
16 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
17 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 ;* Lesser General Public License for more details.
20 ;* You should have received a copy of the GNU Lesser General Public
21 ;* License along with Libav; if not, write to the Free Software
22 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 ;******************************************************************************
25 %include "libavutil/x86/x86util.asm"
29 pw_pixel_max: times 8 dw ((1 << 10)-1)
34 ;-----------------------------------------------------------------------------
35 ; void h264_idct_add(pixel *dst, dctcoef *block, int stride)
36 ;-----------------------------------------------------------------------------
44 CLIPW %1, %4, [pw_pixel_max]
64 IDCT4_1D d,0,1,2,3,4,5
65 TRANSPOSE4x4D 0,1,2,3,4
67 IDCT4_1D d,0,1,2,3,4,5
69 STORE_DIFFx2 m0, m1, m4, m5, %1, %3
71 STORE_DIFFx2 m2, m3, m4, m5, %1, %3
75 cglobal h264_idct_add_10, 3,3
76 IDCT4_ADD_10 r0, r1, r2
87 ;-----------------------------------------------------------------------------
88 ; h264_idct_add16(pixel *dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8])
89 ;-----------------------------------------------------------------------------
90 ;;;;;;; NO FATE SAMPLES TRIGGER THIS
92 add4x4_idct %+ SUFFIX:
98 IDCT4_1D d,0,1,2,3,4,5
99 TRANSPOSE4x4D 0,1,2,3,4
101 IDCT4_1D d,0,1,2,3,4,5
103 STORE_DIFFx2 m0, m1, m4, m5, r5, r3
105 STORE_DIFFx2 m2, m3, m4, m5, r5, r3
112 %if HAVE_AVX_EXTERNAL
122 call add4x4_idct %+ SUFFIX
129 %macro IDCT_ADD16_10 0
130 cglobal h264_idct_add16_10, 5,6
152 %if HAVE_AVX_EXTERNAL
157 ;-----------------------------------------------------------------------------
158 ; void h264_idct_dc_add(pixel *dst, dctcoef *block, int stride)
159 ;-----------------------------------------------------------------------------
160 %macro IDCT_DC_ADD_OP_10 3
163 paddw m1, m0, [%1+0 ]
164 paddw m2, m0, [%1+%2 ]
165 paddw m3, m0, [%1+%2*2]
166 paddw m4, m0, [%1+%3 ]
188 cglobal h264_idct_dc_add_10,3,3
194 mova m6, [pw_pixel_max]
195 IDCT_DC_ADD_OP_10 r0, r2, r1
198 ;-----------------------------------------------------------------------------
199 ; void h264_idct8_dc_add(pixel *dst, dctcoef *block, int stride)
200 ;-----------------------------------------------------------------------------
201 %macro IDCT8_DC_ADD 0
202 cglobal h264_idct8_dc_add_10,3,3,7
209 mova m6, [pw_pixel_max]
210 IDCT_DC_ADD_OP_10 r0, r2, r1
212 IDCT_DC_ADD_OP_10 r0, r2, r1
218 %if HAVE_AVX_EXTERNAL
223 ;-----------------------------------------------------------------------------
224 ; h264_idct_add16intra(pixel *dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8])
225 ;-----------------------------------------------------------------------------
228 mov r5d, [r1+(%1+0)*4]
229 call add4x4_idct %+ SUFFIX
230 mov r5d, [r1+(%1+1)*4]
232 call add4x4_idct %+ SUFFIX
237 %assign last_block 16
238 %macro ADD16_OP_INTRA 2
244 mov r5d, [r1+(%1+0)*4]
245 call idct_dc_add %+ SUFFIX
253 %macro IDCT_ADD16INTRA_10 0
254 idct_dc_add %+ SUFFIX:
263 mova m6, [pw_pixel_max]
264 IDCT_DC_ADD_OP_10 r5, r3, r6
267 cglobal h264_idct_add16intra_10,5,7,8
268 ADD16_OP_INTRA 0, 4+1*8
269 ADD16_OP_INTRA 2, 4+2*8
270 ADD16_OP_INTRA 4, 6+1*8
271 ADD16_OP_INTRA 6, 6+2*8
272 ADD16_OP_INTRA 8, 4+3*8
273 ADD16_OP_INTRA 10, 4+4*8
274 ADD16_OP_INTRA 12, 6+3*8
275 ADD16_OP_INTRA 14, 6+4*8
289 %if HAVE_AVX_EXTERNAL
294 %assign last_block 36
295 ;-----------------------------------------------------------------------------
296 ; h264_idct_add8(pixel **dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8])
297 ;-----------------------------------------------------------------------------
299 cglobal h264_idct_add8_10,5,8,7
305 ADD16_OP_INTRA 16, 4+ 6*8
306 ADD16_OP_INTRA 18, 4+ 7*8
314 ADD16_OP_INTRA 32, 4+11*8
315 ADD16_OP_INTRA 34, 4+12*8
322 %endmacro ; IDCT_ADD8
326 %if HAVE_AVX_EXTERNAL
331 ;-----------------------------------------------------------------------------
332 ; void h264_idct8_add(pixel *dst, dctcoef *block, int stride)
333 ;-----------------------------------------------------------------------------
379 SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
382 %macro IDCT8_1D_FULL 1
389 IDCT8_1D [%1], [%1+ 64*2]
392 ; %1=int16_t *block, %2=int16_t *dstblock
393 %macro IDCT8_ADD_SSE_START 2
396 TRANSPOSE4x4D 0,1,2,3,8
398 TRANSPOSE4x4D 4,5,6,7,8
402 TRANSPOSE4x4D 0,1,2,3,7
408 TRANSPOSE4x4D 4,5,6,7,3
416 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
417 %macro IDCT8_ADD_SSE_END 3
423 STORE_DIFFx2 m0, m1, m6, m7, %1, %3
425 STORE_DIFFx2 m2, m3, m6, m7, %1, %3
429 STORE_DIFFx2 m4, m5, m6, m7, %1, %3
431 STORE_DIFFx2 m0, m1, m6, m7, %1, %3
435 cglobal h264_idct8_add_10, 3,4,16
437 %assign pad 16-gprsize-(stack_offset&15)
439 call h264_idct8_add1_10 %+ SUFFIX
445 ; TODO: does not need to use stack
446 h264_idct8_add1_10 %+ SUFFIX:
447 %assign pad 256+16-gprsize
452 IDCT8_ADD_SSE_START r1, rsp
459 IDCT8_ADD_SSE_START r1+16, rsp+128
460 PERMUTE 1,9, 2,10, 3,11, 5,1, 6,2, 7,3, 9,13, 10,14, 11,15, 13,5, 14,6, 15,7
461 IDCT8_1D [rsp], [rsp+128]
470 IDCT8_1D [rsp+16], [rsp+144]
476 CLIPW m8, m0, [pw_pixel_max]
478 mova m8, [pw_pixel_max]
479 STORE_DIFF16 m9, m1, m0, m8, r0+r2
481 STORE_DIFF16 m10, m2, m0, m8, r0
482 STORE_DIFF16 m11, m3, m0, m8, r0+r2
484 STORE_DIFF16 m12, m4, m0, m8, r0
485 STORE_DIFF16 m13, m5, m0, m8, r0+r2
487 STORE_DIFF16 m14, m6, m0, m8, r0
488 STORE_DIFF16 m15, m7, m0, m8, r0+r2
490 IDCT8_ADD_SSE_START r1, rsp
491 IDCT8_ADD_SSE_START r1+16, rsp+128
493 IDCT8_ADD_SSE_END r0, rsp, r2
494 IDCT8_ADD_SSE_END r3, rsp+16, r2
503 %if HAVE_AVX_EXTERNAL
508 ;-----------------------------------------------------------------------------
509 ; h264_idct8_add4(pixel **dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8])
510 ;-----------------------------------------------------------------------------
511 ;;;;;;; NO FATE SAMPLES TRIGGER THIS
512 %macro IDCT8_ADD4_OP 2
517 call h264_idct8_add1_10 %+ SUFFIX
525 cglobal h264_idct8_add4_10, 0,7,16
526 %assign pad 16-gprsize-(stack_offset&15)
533 IDCT8_ADD4_OP 0, 4+1*8
534 IDCT8_ADD4_OP 4, 6+1*8
535 IDCT8_ADD4_OP 8, 4+3*8
536 IDCT8_ADD4_OP 12, 6+3*8
539 %endmacro ; IDCT8_ADD4
543 %if HAVE_AVX_EXTERNAL