1 ;*****************************************************************************
2 ;* MMX/SSE2/AVX-optimized 10-bit H.264 iDCT code
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2011 x264 project
6 ;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
8 ;* This file is part of Libav.
10 ;* Libav is free software; you can redistribute it and/or
11 ;* modify it under the terms of the GNU Lesser General Public
12 ;* License as published by the Free Software Foundation; either
13 ;* version 2.1 of the License, or (at your option) any later version.
15 ;* Libav is distributed in the hope that it will be useful,
16 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
17 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 ;* Lesser General Public License for more details.
20 ;* You should have received a copy of the GNU Lesser General Public
21 ;* License along with Libav; if not, write to the Free Software
22 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 ;******************************************************************************
25 %include "libavutil/x86/x86inc.asm"
26 %include "libavutil/x86/x86util.asm"
30 pw_pixel_max: times 8 dw ((1 << 10)-1)
32 scan8_mem: db 4+ 1*8, 5+ 1*8, 4+ 2*8, 5+ 2*8
33 db 6+ 1*8, 7+ 1*8, 6+ 2*8, 7+ 2*8
34 db 4+ 3*8, 5+ 3*8, 4+ 4*8, 5+ 4*8
35 db 6+ 3*8, 7+ 3*8, 6+ 4*8, 7+ 4*8
36 db 4+ 6*8, 5+ 6*8, 4+ 7*8, 5+ 7*8
37 db 6+ 6*8, 7+ 6*8, 6+ 7*8, 7+ 7*8
38 db 4+ 8*8, 5+ 8*8, 4+ 9*8, 5+ 9*8
39 db 6+ 8*8, 7+ 8*8, 6+ 9*8, 7+ 9*8
40 db 4+11*8, 5+11*8, 4+12*8, 5+12*8
41 db 6+11*8, 7+11*8, 6+12*8, 7+12*8
42 db 4+13*8, 5+13*8, 4+14*8, 5+14*8
43 db 6+13*8, 7+13*8, 6+14*8, 7+14*8
48 %define scan8 scan8_mem
53 ;-----------------------------------------------------------------------------
54 ; void h264_idct_add(pixel *dst, dctcoef *block, int stride)
55 ;-----------------------------------------------------------------------------
63 CLIPW %1, %4, [pw_pixel_max]
83 IDCT4_1D d,0,1,2,3,4,5
84 TRANSPOSE4x4D 0,1,2,3,4
86 IDCT4_1D d,0,1,2,3,4,5
88 STORE_DIFFx2 m0, m1, m4, m5, %1, %3
90 STORE_DIFFx2 m2, m3, m4, m5, %1, %3
94 cglobal h264_idct_add_10_%1, 3,3
95 IDCT4_ADD_10 r0, r1, r2
106 ;-----------------------------------------------------------------------------
107 ; h264_idct_add16(pixel *dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8])
108 ;-----------------------------------------------------------------------------
109 ;;;;;;; NO FATE SAMPLES TRIGGER THIS
117 IDCT4_1D d,0,1,2,3,4,5
118 TRANSPOSE4x4D 0,1,2,3,4
120 IDCT4_1D d,0,1,2,3,4,5
122 STORE_DIFFx2 m0, m1, m4, m5, r5, r3
124 STORE_DIFFx2 m2, m3, m4, m5, r5, r3
148 %macro IDCT_ADD16_10 1
149 cglobal h264_idct_add16_10_%1, 5,6
150 ADD16_OP %1, 0, 4+1*8
151 ADD16_OP %1, 1, 5+1*8
152 ADD16_OP %1, 2, 4+2*8
153 ADD16_OP %1, 3, 5+2*8
154 ADD16_OP %1, 4, 6+1*8
155 ADD16_OP %1, 5, 7+1*8
156 ADD16_OP %1, 6, 6+2*8
157 ADD16_OP %1, 7, 7+2*8
158 ADD16_OP %1, 8, 4+3*8
159 ADD16_OP %1, 9, 5+3*8
160 ADD16_OP %1, 10, 4+4*8
161 ADD16_OP %1, 11, 5+4*8
162 ADD16_OP %1, 12, 6+3*8
163 ADD16_OP %1, 13, 7+3*8
164 ADD16_OP %1, 14, 6+4*8
165 ADD16_OP %1, 15, 7+4*8
176 ;-----------------------------------------------------------------------------
177 ; void h264_idct_dc_add(pixel *dst, dctcoef *block, int stride)
178 ;-----------------------------------------------------------------------------
179 %macro IDCT_DC_ADD_OP_10 3
182 paddw m1, m0, [%1+0 ]
183 paddw m2, m0, [%1+%2 ]
184 paddw m3, m0, [%1+%2*2]
185 paddw m4, m0, [%1+%3 ]
207 cglobal h264_idct_dc_add_10_mmx2,3,3
213 mova m6, [pw_pixel_max]
214 IDCT_DC_ADD_OP_10 r0, r2, r1
217 ;-----------------------------------------------------------------------------
218 ; void h264_idct8_dc_add(pixel *dst, dctcoef *block, int stride)
219 ;-----------------------------------------------------------------------------
220 %macro IDCT8_DC_ADD 1
221 cglobal h264_idct8_dc_add_10_%1,3,3,7
228 mova m6, [pw_pixel_max]
229 IDCT_DC_ADD_OP_10 r0, r2, r1
231 IDCT_DC_ADD_OP_10 r0, r2, r1
242 ;-----------------------------------------------------------------------------
243 ; h264_idct_add16intra(pixel *dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8])
244 ;-----------------------------------------------------------------------------
247 mov r5d, [r1+(%2+0)*4]
249 mov r5d, [r1+(%2+1)*4]
256 %assign last_block 16
257 %macro ADD16_OP_INTRA 3
263 mov r5d, [r1+(%2+0)*4]
272 %macro IDCT_ADD16INTRA_10 1
282 mova m6, [pw_pixel_max]
283 IDCT_DC_ADD_OP_10 r5, r3, r6
286 cglobal h264_idct_add16intra_10_%1,5,7,8
287 ADD16_OP_INTRA %1, 0, 4+1*8
288 ADD16_OP_INTRA %1, 2, 4+2*8
289 ADD16_OP_INTRA %1, 4, 6+1*8
290 ADD16_OP_INTRA %1, 6, 6+2*8
291 ADD16_OP_INTRA %1, 8, 4+3*8
292 ADD16_OP_INTRA %1, 10, 4+4*8
293 ADD16_OP_INTRA %1, 12, 6+3*8
294 ADD16_OP_INTRA %1, 14, 6+4*8
307 IDCT_ADD16INTRA_10 sse2
310 IDCT_ADD16INTRA_10 avx
313 %assign last_block 36
314 ;-----------------------------------------------------------------------------
315 ; h264_idct_add8(pixel **dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8])
316 ;-----------------------------------------------------------------------------
318 cglobal h264_idct_add8_10_%1,5,7
324 ADD16_OP_INTRA %1, 16, 4+ 6*8
325 ADD16_OP_INTRA %1, 18, 4+ 7*8
328 mov r0, [r10+gprsize]
333 ADD16_OP_INTRA %1, 32, 4+11*8
334 ADD16_OP_INTRA %1, 34, 4+12*8
341 %endmacro ; IDCT_ADD8
350 ;-----------------------------------------------------------------------------
351 ; void h264_idct8_add(pixel *dst, dctcoef *block, int stride)
352 ;-----------------------------------------------------------------------------
398 SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
401 %macro IDCT8_1D_FULL 1
408 IDCT8_1D [%1], [%1+ 64*2]
411 ; %1=int16_t *block, %2=int16_t *dstblock
412 %macro IDCT8_ADD_SSE_START 2
415 TRANSPOSE4x4D 0,1,2,3,8
417 TRANSPOSE4x4D 4,5,6,7,8
421 TRANSPOSE4x4D 0,1,2,3,7
427 TRANSPOSE4x4D 4,5,6,7,3
435 ; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
436 %macro IDCT8_ADD_SSE_END 3
442 STORE_DIFFx2 m0, m1, m6, m7, %1, %3
444 STORE_DIFFx2 m2, m3, m6, m7, %1, %3
448 STORE_DIFFx2 m4, m5, m6, m7, %1, %3
450 STORE_DIFFx2 m0, m1, m6, m7, %1, %3
454 cglobal h264_idct8_add_10_%1, 3,4,16
456 %assign pad 16-gprsize-(stack_offset&15)
458 call h264_idct8_add1_10_%1
464 ; TODO: does not need to use stack
465 h264_idct8_add1_10_%1:
466 %assign pad 256+16-gprsize
471 IDCT8_ADD_SSE_START r1, rsp
478 IDCT8_ADD_SSE_START r1+16, rsp+128
479 PERMUTE 1,9, 2,10, 3,11, 5,1, 6,2, 7,3, 9,13, 10,14, 11,15, 13,5, 14,6, 15,7
480 IDCT8_1D [rsp], [rsp+128]
489 IDCT8_1D [rsp+16], [rsp+144]
495 CLIPW m8, m0, [pw_pixel_max]
497 mova m8, [pw_pixel_max]
498 STORE_DIFF16 m9, m1, m0, m8, r0+r2
500 STORE_DIFF16 m10, m2, m0, m8, r0
501 STORE_DIFF16 m11, m3, m0, m8, r0+r2
503 STORE_DIFF16 m12, m4, m0, m8, r0
504 STORE_DIFF16 m13, m5, m0, m8, r0+r2
506 STORE_DIFF16 m14, m6, m0, m8, r0
507 STORE_DIFF16 m15, m7, m0, m8, r0+r2
509 IDCT8_ADD_SSE_START r1, rsp
510 IDCT8_ADD_SSE_START r1+16, rsp+128
512 IDCT8_ADD_SSE_END r0, rsp, r2
513 IDCT8_ADD_SSE_END r3, rsp+16, r2
527 ;-----------------------------------------------------------------------------
528 ; h264_idct8_add4(pixel **dst, const int *block_offset, dctcoef *block, int stride, const uint8_t nnzc[6*8])
529 ;-----------------------------------------------------------------------------
530 ;;;;;;; NO FATE SAMPLES TRIGGER THIS
531 %macro IDCT8_ADD4_OP 3
536 call h264_idct8_add1_10_%1
544 cglobal h264_idct8_add4_10_%1, 0,7,16
545 %assign pad 16-gprsize-(stack_offset&15)
552 IDCT8_ADD4_OP %1, 0, 4+1*8
553 IDCT8_ADD4_OP %1, 4, 6+1*8
554 IDCT8_ADD4_OP %1, 8, 4+3*8
555 IDCT8_ADD4_OP %1, 12, 6+3*8
558 %endmacro ; IDCT8_ADD4