1 ;*****************************************************************************
2 ;* dct-32.asm: h264 encoder library
3 ;*****************************************************************************
4 ;* Copyright (C) 2003-2008 x264 project
6 ;* Authors: Laurent Aimar <fenrir@via.ecp.fr> (initial version)
7 ;* Loren Merritt <lorenm@u.washington.edu> (misc)
8 ;* Min Chen <chenm001.163.com> (converted to nasm)
9 ;* Christian Heine <sennindemokrit@gmx.net> (dct8/idct8 functions)
11 ;* This program is free software; you can redistribute it and/or modify
12 ;* it under the terms of the GNU General Public License as published by
13 ;* the Free Software Foundation; either version 2 of the License, or
14 ;* (at your option) any later version.
16 ;* This program is distributed in the hope that it will be useful,
17 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
18 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 ;* GNU General Public License for more details.
21 ;* You should have received a copy of the GNU General Public License
22 ;* along with this program; if not, write to the Free Software
23 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 ;*****************************************************************************
27 %include "x86util.asm"
36 ; out: 0,4,6 in mem, rest in regs
38 SUMSUB_BA m%8, m%1 ; %8 = s07, %1 = d07
39 SUMSUB_BA m%7, m%2 ; %7 = s16, %2 = d16
40 SUMSUB_BA m%6, m%3 ; %6 = s25, %3 = d25
41 SUMSUB_BA m%5, m%4 ; %5 = s34, %4 = d34
42 SUMSUB_BA m%5, m%8 ; %5 = a0, %8 = a2
43 SUMSUB_BA m%6, m%7 ; %6 = a1, %7 = a3
44 SUMSUB_BA m%6, m%5 ; %6 = dst0, %5 = dst4
49 paddw m%6, m%8 ; a2 + (a3>>1)
51 psubw m%8, m%7 ; (a2>>1) - a3
55 paddw m%5, m%3 ; d25+(d25>>1)
57 psubw m%7, m%4 ; a5 = d07-d34-(d25+(d25>>1))
61 paddw m%5, m%2 ; d16+(d16>>1)
64 psubw m%8, m%5 ; a6 = d07+d34-(d16+(d16>>1))
67 paddw m%5, m%1 ; d07+(d07>>1)
69 paddw m%5, m%3 ; a4 = d16+d25+(d07+(d07>>1))
72 paddw m%1, m%4 ; d34+(d34>>1)
74 psubw m%1, m%3 ; a7 = d16-d25+(d34+(d34>>1))
77 paddw m%4, m%5 ; a4 + (a7>>2)
80 paddw m%3, m%7 ; a5 + (a6>>2)
83 psubw m%5, m%1 ; (a4>>2) - a7
84 psubw m%8, m%7 ; a6 - (a5>>2)
85 SWAP %2, %4, %3, %6, %8, %5
88 ; in: 0,4 in mem, rest in regs
141 LOAD_DIFF m0, m7, none, [r1+0*FENC_STRIDE], [r2+0*FDEC_STRIDE]
142 LOAD_DIFF m1, m7, none, [r1+1*FENC_STRIDE], [r2+1*FDEC_STRIDE]
143 LOAD_DIFF m2, m7, none, [r1+2*FENC_STRIDE], [r2+2*FDEC_STRIDE]
144 LOAD_DIFF m3, m7, none, [r1+3*FENC_STRIDE], [r2+3*FDEC_STRIDE]
145 LOAD_DIFF m4, m7, none, [r1+4*FENC_STRIDE], [r2+4*FDEC_STRIDE]
146 LOAD_DIFF m5, m7, none, [r1+5*FENC_STRIDE], [r2+5*FDEC_STRIDE]
148 LOAD_DIFF m6, m7, none, [r1+6*FENC_STRIDE], [r2+6*FDEC_STRIDE]
149 LOAD_DIFF m7, m0, none, [r1+7*FENC_STRIDE], [r2+7*FDEC_STRIDE]
156 DCT8_1D 0,1,2,3,4,5,6,7,r0
157 SAVE_MM_PERMUTATION dct8_mmx
160 %macro SPILL_SHUFFLE 3-* ; ptr, list of regs, list of memory offsets
165 mova [%%base + %2*16], %%tmp
170 %macro UNSPILL_SHUFFLE 3-*
175 mova %%tmp, [%%base + %2*16]
180 %macro SPILL 2+ ; assume offsets are the same as reg numbers
181 SPILL_SHUFFLE %1, %2, %2
185 UNSPILL_SHUFFLE %1, %2, %2
188 ;-----------------------------------------------------------------------------
189 ; void x264_sub8x8_dct8_mmx( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
190 ;-----------------------------------------------------------------------------
191 cglobal x264_sub8x8_dct8_mmx, 3,3
192 global x264_sub8x8_dct8_mmx %+ .skip_prologue
195 call load_diff_4x8_mmx
198 TRANSPOSE4x4W 0,1,2,3,4
201 TRANSPOSE4x4W 4,5,6,7,0
207 call load_diff_4x8_mmx
213 TRANSPOSE4x4W 4,5,6,7,0
216 TRANSPOSE4x4W 0,1,2,3,5
218 SPILL_SHUFFLE r0, 0,1,2,3, 4,5,6,7
219 movq mm4, m6 ; depends on the permutation to not produce conflicts
225 UNSPILL r0+8, 4,5,6,7
229 SPILL r0+8, 1,2,3,5,7
231 UNSPILL r0, 0,1,2,3,4,5,6,7
239 IDCT8_1D 0,1,2,3,4,5,6,7,r1
240 SAVE_MM_PERMUTATION idct8_mmx
243 %macro ADD_STORE_ROW 3
244 movq m1, [r0+%1*FDEC_STRIDE]
251 movq [r0+%1*FDEC_STRIDE], m1
254 ;-----------------------------------------------------------------------------
255 ; void x264_add8x8_idct8_mmx( uint8_t *dst, int16_t dct[8][8] )
256 ;-----------------------------------------------------------------------------
257 cglobal x264_add8x8_idct8_mmx, 2,2
258 global x264_add8x8_idct8_mmx %+ .skip_prologue
262 UNSPILL r1, 1,2,3,5,6,7
265 TRANSPOSE4x4W 0,1,2,3,7
268 TRANSPOSE4x4W 4,5,6,7,0
271 UNSPILL r1+8, 1,2,3,5,6,7
276 TRANSPOSE4x4W 0,1,2,3,7
279 TRANSPOSE4x4W 4,5,6,7,0
286 ; memory layout at this time:
295 UNSPILL_SHUFFLE r1, 1,2,3, 5,6,7
308 movq [r1+0x08], m0 ; mm4
309 movq [r1+0x48], m4 ; mm5
310 movq [r1+0x58], m5 ; mm0
311 movq [r1+0x68], m6 ; mm2
312 movq [r1+0x78], m7 ; mm6
315 movq [r1+0x18], m1 ; mm1
316 movq [r1+0x28], m2 ; mm7
318 movq [r1+0x38], m3 ; mm3
333 ADD_STORE_ROW 0, [r1+0x00], [r1+0x08]
334 ADD_STORE_ROW 1, [r1+0x10], [r1+0x18]
335 ADD_STORE_ROW 2, [r1+0x20], [r1+0x28]
336 ADD_STORE_ROW 3, m3, [r1+0x38]
337 ADD_STORE_ROW 4, m4, [r1+0x48]
338 ADD_STORE_ROW 5, m5, [r1+0x58]
339 ADD_STORE_ROW 6, m6, [r1+0x68]
340 ADD_STORE_ROW 7, m7, [r1+0x78]
347 ;-----------------------------------------------------------------------------
348 ; void x264_sub8x8_dct8_sse2( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
349 ;-----------------------------------------------------------------------------
350 cglobal x264_sub8x8_dct8_sse2, 3,3
351 global x264_sub8x8_dct8_sse2 %+ .skip_prologue
353 LOAD_DIFF m0, m7, none, [r1+0*FENC_STRIDE], [r2+0*FDEC_STRIDE]
354 LOAD_DIFF m1, m7, none, [r1+1*FENC_STRIDE], [r2+1*FDEC_STRIDE]
355 LOAD_DIFF m2, m7, none, [r1+2*FENC_STRIDE], [r2+2*FDEC_STRIDE]
356 LOAD_DIFF m3, m7, none, [r1+3*FENC_STRIDE], [r2+3*FDEC_STRIDE]
357 LOAD_DIFF m4, m7, none, [r1+4*FENC_STRIDE], [r2+4*FDEC_STRIDE]
358 LOAD_DIFF m5, m7, none, [r1+5*FENC_STRIDE], [r2+5*FDEC_STRIDE]
360 LOAD_DIFF m6, m7, none, [r1+6*FENC_STRIDE], [r2+6*FDEC_STRIDE]
361 LOAD_DIFF m7, m0, none, [r1+7*FENC_STRIDE], [r2+7*FDEC_STRIDE]
363 DCT8_1D 0,1,2,3,4,5,6,7,r0
365 TRANSPOSE8x8W 0,1,2,3,4,5,6,7,[r0+0x60],[r0+0x40],1
367 DCT8_1D 0,1,2,3,4,5,6,7,r0
371 ;-----------------------------------------------------------------------------
372 ; void x264_add8x8_idct8_sse2( uint8_t *p_dst, int16_t dct[8][8] )
373 ;-----------------------------------------------------------------------------
374 cglobal x264_add8x8_idct8_sse2, 2,2
375 global x264_add8x8_idct8_sse2 %+ .skip_prologue
377 UNSPILL r1, 1,2,3,5,6,7
378 IDCT8_1D 0,1,2,3,4,5,6,7,r1
380 TRANSPOSE8x8W 0,1,2,3,4,5,6,7,[r1+0x60],[r1+0x40],1
381 paddw m0, [pw_32 GLOBAL]
383 IDCT8_1D 0,1,2,3,4,5,6,7,r1
386 STORE_DIFF m0, m6, m7, [r0+FDEC_STRIDE*0]
387 STORE_DIFF m1, m6, m7, [r0+FDEC_STRIDE*1]
388 STORE_DIFF m2, m6, m7, [r0+FDEC_STRIDE*2]
389 STORE_DIFF m3, m6, m7, [r0+FDEC_STRIDE*3]
390 STORE_DIFF m4, m6, m7, [r0+FDEC_STRIDE*4]
391 STORE_DIFF m5, m6, m7, [r0+FDEC_STRIDE*5]
392 UNSPILL_SHUFFLE r1, 0,1, 6,7
393 STORE_DIFF m0, m6, m7, [r0+FDEC_STRIDE*6]
394 STORE_DIFF m1, m6, m7, [r0+FDEC_STRIDE*7]