1 ;*****************************************************************************
2 ;* MMX/SSE2/AVX-optimized 10-bit H.264 iDCT code
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2011 x264 project
6 ;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
8 ;* This file is part of FFmpeg.
10 ;* FFmpeg is free software; you can redistribute it and/or
11 ;* modify it under the terms of the GNU Lesser General Public
12 ;* License as published by the Free Software Foundation; either
13 ;* version 2.1 of the License, or (at your option) any later version.
15 ;* FFmpeg is distributed in the hope that it will be useful,
16 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
17 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 ;* Lesser General Public License for more details.
20 ;* You should have received a copy of the GNU Lesser General Public
21 ;* License along with FFmpeg; if not, write to the Free Software
22 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 ;******************************************************************************
25 %include "libavutil/x86/x86util.asm"
30 %define pw_pixel_max pw_1023
33 ;-----------------------------------------------------------------------------
34 ; void ff_h264_idct_add_10(pixel *dst, int16_t *block, int stride)
35 ;-----------------------------------------------------------------------------
43 CLIPW %1, %4, [pw_pixel_max]
63 IDCT4_1D d,0,1,2,3,4,5
64 TRANSPOSE4x4D 0,1,2,3,4
66 IDCT4_1D d,0,1,2,3,4,5
72 STORE_DIFFx2 m0, m1, m4, m5, %1, %3
74 STORE_DIFFx2 m2, m3, m4, m5, %1, %3
78 cglobal h264_idct_add_10, 3,3
80 IDCT4_ADD_10 r0, r1, r2
91 ;-----------------------------------------------------------------------------
92 ; void ff_h264_idct_add16_10(pixel *dst, const int *block_offset,
93 ; int16_t *block, int stride,
94 ; const uint8_t nnzc[6*8])
95 ;-----------------------------------------------------------------------------
96 ;;;;;;; NO FATE SAMPLES TRIGGER THIS
98 add4x4_idct %+ SUFFIX:
104 IDCT4_1D d,0,1,2,3,4,5
105 TRANSPOSE4x4D 0,1,2,3,4
107 IDCT4_1D d,0,1,2,3,4,5
113 STORE_DIFFx2 m0, m1, m4, m5, r5, r3
115 STORE_DIFFx2 m2, m3, m4, m5, r5, r3
122 %if HAVE_AVX_EXTERNAL
132 call add4x4_idct %+ SUFFIX
139 %macro IDCT_ADD16_10 0
140 cglobal h264_idct_add16_10, 5,6
163 %if HAVE_AVX_EXTERNAL
168 ;-----------------------------------------------------------------------------
169 ; void ff_h264_idct_dc_add_10(pixel *dst, int16_t *block, int stride)
170 ;-----------------------------------------------------------------------------
171 %macro IDCT_DC_ADD_OP_10 3
174 paddw m1, m0, [%1+0 ]
175 paddw m2, m0, [%1+%2 ]
176 paddw m3, m0, [%1+%2*2]
177 paddw m4, m0, [%1+%3 ]
199 cglobal h264_idct_dc_add_10,3,3
207 mova m6, [pw_pixel_max]
208 IDCT_DC_ADD_OP_10 r0, r2, r1
211 ;-----------------------------------------------------------------------------
212 ; void ff_h264_idct8_dc_add_10(pixel *dst, int16_t *block, int stride)
213 ;-----------------------------------------------------------------------------
214 %macro IDCT8_DC_ADD 0
215 cglobal h264_idct8_dc_add_10,3,4,7
223 mova m6, [pw_pixel_max]
224 IDCT_DC_ADD_OP_10 r0, r2, r1
226 IDCT_DC_ADD_OP_10 r0, r2, r1
232 %if HAVE_AVX_EXTERNAL
237 ;-----------------------------------------------------------------------------
238 ; void ff_h264_idct_add16intra_10(pixel *dst, const int *block_offset,
239 ; int16_t *block, int stride,
240 ; const uint8_t nnzc[6*8])
241 ;-----------------------------------------------------------------------------
244 mov r5d, [r1+(%1+0)*4]
245 call add4x4_idct %+ SUFFIX
246 mov r5d, [r1+(%1+1)*4]
248 call add4x4_idct %+ SUFFIX
253 %assign last_block 16
254 %macro ADD16_OP_INTRA 2
260 mov r5d, [r1+(%1+0)*4]
261 call idct_dc_add %+ SUFFIX
269 %macro IDCT_ADD16INTRA_10 0
270 idct_dc_add %+ SUFFIX:
281 mova m6, [pw_pixel_max]
282 IDCT_DC_ADD_OP_10 r5, r3, r6
285 cglobal h264_idct_add16intra_10,5,7,8
287 ADD16_OP_INTRA 0, 4+1*8
288 ADD16_OP_INTRA 2, 4+2*8
289 ADD16_OP_INTRA 4, 6+1*8
290 ADD16_OP_INTRA 6, 6+2*8
291 ADD16_OP_INTRA 8, 4+3*8
292 ADD16_OP_INTRA 10, 4+4*8
293 ADD16_OP_INTRA 12, 6+3*8
294 ADD16_OP_INTRA 14, 6+4*8
308 %if HAVE_AVX_EXTERNAL
313 %assign last_block 36
314 ;-----------------------------------------------------------------------------
315 ; void ff_h264_idct_add8_10(pixel **dst, const int *block_offset,
316 ; int16_t *block, int stride,
317 ; const uint8_t nnzc[6*8])
318 ;-----------------------------------------------------------------------------
320 cglobal h264_idct_add8_10,5,8,7
327 ADD16_OP_INTRA 16, 4+ 6*8
328 ADD16_OP_INTRA 18, 4+ 7*8
336 ADD16_OP_INTRA 32, 4+11*8
337 ADD16_OP_INTRA 34, 4+12*8
344 %endmacro ; IDCT_ADD8
348 %if HAVE_AVX_EXTERNAL
353 ;-----------------------------------------------------------------------------
354 ; void ff_h264_idct_add8_422_10(pixel **dst, const int *block_offset,
355 ; int16_t *block, int stride,
356 ; const uint8_t nnzc[6*8])
357 ;-----------------------------------------------------------------------------
358 %assign last_block 44
360 %macro IDCT_ADD8_422 0
362 cglobal h264_idct_add8_422_10, 5, 8, 7
370 ADD16_OP_INTRA 16, 4+ 6*8
371 ADD16_OP_INTRA 18, 4+ 7*8
372 ADD16_OP_INTRA 24, 4+ 8*8 ; i+4
373 ADD16_OP_INTRA 26, 4+ 9*8 ; i+4
383 ADD16_OP_INTRA 32, 4+11*8
384 ADD16_OP_INTRA 34, 4+12*8
385 ADD16_OP_INTRA 40, 4+13*8 ; i+4
386 ADD16_OP_INTRA 42, 4+14*8 ; i+4
401 %if HAVE_AVX_EXTERNAL
406 ;-----------------------------------------------------------------------------
407 ; void ff_h264_idct8_add_10(pixel *dst, int16_t *block, int stride)
408 ;-----------------------------------------------------------------------------
454 SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
457 %macro IDCT8_1D_FULL 1
464 IDCT8_1D [%1], [%1+ 64*2]
467 ; %1=int16_t *block, %2=int16_t *dstblock
468 %macro IDCT8_ADD_SSE_START 2
471 TRANSPOSE4x4D 0,1,2,3,8
473 TRANSPOSE4x4D 4,5,6,7,8
477 TRANSPOSE4x4D 0,1,2,3,7
483 TRANSPOSE4x4D 4,5,6,7,3
491 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
492 %macro IDCT8_ADD_SSE_END 3
498 STORE_DIFFx2 m0, m1, m6, m7, %1, %3
500 STORE_DIFFx2 m2, m3, m6, m7, %1, %3
504 STORE_DIFFx2 m4, m5, m6, m7, %1, %3
506 STORE_DIFFx2 m0, m1, m6, m7, %1, %3
510 cglobal h264_idct8_add_10, 3,4,16
513 %assign pad 16-gprsize-(stack_offset&15)
515 call h264_idct8_add1_10 %+ SUFFIX
521 ; TODO: does not need to use stack
522 h264_idct8_add1_10 %+ SUFFIX:
523 %assign pad 256+16-gprsize
528 IDCT8_ADD_SSE_START r1, rsp
535 IDCT8_ADD_SSE_START r1+16, rsp+128
536 PERMUTE 1,9, 2,10, 3,11, 5,1, 6,2, 7,3, 9,13, 10,14, 11,15, 13,5, 14,6, 15,7
537 IDCT8_1D [rsp], [rsp+128]
546 IDCT8_1D [rsp+16], [rsp+144]
568 CLIPW m8, m0, [pw_pixel_max]
570 mova m8, [pw_pixel_max]
571 STORE_DIFF16 m9, m1, m0, m8, r0+r2
573 STORE_DIFF16 m10, m2, m0, m8, r0
574 STORE_DIFF16 m11, m3, m0, m8, r0+r2
576 STORE_DIFF16 m12, m4, m0, m8, r0
577 STORE_DIFF16 m13, m5, m0, m8, r0+r2
579 STORE_DIFF16 m14, m6, m0, m8, r0
580 STORE_DIFF16 m15, m7, m0, m8, r0+r2
582 IDCT8_ADD_SSE_START r1, rsp
583 IDCT8_ADD_SSE_START r1+16, rsp+128
585 IDCT8_ADD_SSE_END r0, rsp, r2
586 IDCT8_ADD_SSE_END r3, rsp+16, r2
611 %if HAVE_AVX_EXTERNAL
616 ;-----------------------------------------------------------------------------
617 ; void ff_h264_idct8_add4_10(pixel **dst, const int *block_offset,
618 ; int16_t *block, int stride,
619 ; const uint8_t nnzc[6*8])
620 ;-----------------------------------------------------------------------------
621 ;;;;;;; NO FATE SAMPLES TRIGGER THIS
622 %macro IDCT8_ADD4_OP 2
627 call h264_idct8_add1_10 %+ SUFFIX
635 cglobal h264_idct8_add4_10, 0,7,16
637 %assign pad 16-gprsize-(stack_offset&15)
644 IDCT8_ADD4_OP 0, 4+1*8
645 IDCT8_ADD4_OP 4, 6+1*8
646 IDCT8_ADD4_OP 8, 4+3*8
647 IDCT8_ADD4_OP 12, 6+3*8
650 %endmacro ; IDCT8_ADD4
654 %if HAVE_AVX_EXTERNAL