1 ;*****************************************************************************
2 ;* dct-32.asm: h264 encoder library
3 ;*****************************************************************************
4 ;* Copyright (C) 2003-2008 x264 project
6 ;* Authors: Laurent Aimar <fenrir@via.ecp.fr> (initial version)
7 ;* Loren Merritt <lorenm@u.washington.edu> (misc)
8 ;* Min Chen <chenm001.163.com> (converted to nasm)
9 ;* Christian Heine <sennindemokrit@gmx.net> (dct8/idct8 functions)
11 ;* This program is free software; you can redistribute it and/or modify
12 ;* it under the terms of the GNU General Public License as published by
13 ;* the Free Software Foundation; either version 2 of the License, or
14 ;* (at your option) any later version.
16 ;* This program is distributed in the hope that it will be useful,
17 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
18 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 ;* GNU General Public License for more details.
21 ;* You should have received a copy of the GNU General Public License
22 ;* along with this program; if not, write to the Free Software
23 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 ;*****************************************************************************
47 %macro TRANSPOSE4x4W 5
48 SBUTTERFLY wd, %1, %2, %5
49 SBUTTERFLY wd, %3, %4, %5
50 SBUTTERFLY dq, %1, %3, %5
51 SBUTTERFLY dq, %2, %4, %5
63 %macro STORE_DIFF_8P 4
73 ; out: 0,4,6 in mem, rest in regs
75 SUMSUB_BA m%8, m%1 ; %8 = s07, %1 = d07
76 SUMSUB_BA m%7, m%2 ; %7 = s16, %2 = d16
77 SUMSUB_BA m%6, m%3 ; %6 = s25, %3 = d25
78 SUMSUB_BA m%5, m%4 ; %5 = s34, %4 = d34
79 SUMSUB_BA m%5, m%8 ; %5 = a0, %8 = a2
80 SUMSUB_BA m%6, m%7 ; %6 = a1, %7 = a3
81 SUMSUB_BA m%6, m%5 ; %6 = dst0, %5 = dst4
86 paddw m%6, m%8 ; a2 + (a3>>1)
88 psubw m%8, m%7 ; (a2>>1) - a3
92 paddw m%5, m%3 ; d25+(d25>>1)
94 psubw m%7, m%4 ; a5 = d07-d34-(d25+(d25>>1))
98 paddw m%5, m%2 ; d16+(d16>>1)
101 psubw m%8, m%5 ; a6 = d07+d34-(d16+(d16>>1))
104 paddw m%5, m%1 ; d07+(d07>>1)
106 paddw m%5, m%3 ; a4 = d16+d25+(d07+(d07>>1))
109 paddw m%1, m%4 ; d34+(d34>>1)
111 psubw m%1, m%3 ; a7 = d16-d25+(d34+(d34>>1))
114 paddw m%4, m%5 ; a4 + (a7>>2)
117 paddw m%3, m%7 ; a5 + (a6>>2)
120 psubw m%5, m%1 ; (a4>>2) - a7
121 psubw m%8, m%7 ; a6 - (a5>>2)
122 SWAP %2, %4, %3, %6, %8, %5
125 ; in: 0,4 in mem, rest in regs
178 LOAD_DIFF_8P m0, m7, [r1+0*FENC_STRIDE], [r2+0*FDEC_STRIDE]
179 LOAD_DIFF_8P m1, m7, [r1+1*FENC_STRIDE], [r2+1*FDEC_STRIDE]
180 LOAD_DIFF_8P m2, m7, [r1+2*FENC_STRIDE], [r2+2*FDEC_STRIDE]
181 LOAD_DIFF_8P m3, m7, [r1+3*FENC_STRIDE], [r2+3*FDEC_STRIDE]
182 LOAD_DIFF_8P m4, m7, [r1+4*FENC_STRIDE], [r2+4*FDEC_STRIDE]
183 LOAD_DIFF_8P m5, m7, [r1+5*FENC_STRIDE], [r2+5*FDEC_STRIDE]
185 LOAD_DIFF_8P m6, m7, [r1+6*FENC_STRIDE], [r2+6*FDEC_STRIDE]
186 LOAD_DIFF_8P m7, m0, [r1+7*FENC_STRIDE], [r2+7*FDEC_STRIDE]
193 DCT8_1D 0,1,2,3,4,5,6,7,r0
194 SAVE_MM_PERMUTATION dct8_mmx
197 %macro SPILL_SHUFFLE 3-* ; ptr, list of regs, list of memory offsets
202 mova [%%base + %2*16], %%tmp
207 %macro UNSPILL_SHUFFLE 3-*
212 mova %%tmp, [%%base + %2*16]
217 %macro SPILL 2+ ; assume offsets are the same as reg numbers
218 SPILL_SHUFFLE %1, %2, %2
222 UNSPILL_SHUFFLE %1, %2, %2
225 ;-----------------------------------------------------------------------------
226 ; void x264_sub8x8_dct8_mmx( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
227 ;-----------------------------------------------------------------------------
228 cglobal x264_sub8x8_dct8_mmx, 3,3
229 global x264_sub8x8_dct8_mmx %+ .skip_prologue
232 call load_diff_4x8_mmx
235 TRANSPOSE4x4W 0,1,2,3,4
238 TRANSPOSE4x4W 4,5,6,7,0
244 call load_diff_4x8_mmx
250 TRANSPOSE4x4W 4,5,6,7,0
253 TRANSPOSE4x4W 0,1,2,3,5
255 SPILL_SHUFFLE r0, 0,1,2,3, 4,5,6,7
256 movq mm4, m6 ; depends on the permutation to not produce conflicts
262 UNSPILL r0+8, 4,5,6,7
266 SPILL r0+8, 1,2,3,5,7
268 UNSPILL r0, 0,1,2,3,4,5,6,7
276 IDCT8_1D 0,1,2,3,4,5,6,7,r1
277 SAVE_MM_PERMUTATION idct8_mmx
280 %macro ADD_STORE_ROW 3
281 movq m1, [r0+%1*FDEC_STRIDE]
288 movq [r0+%1*FDEC_STRIDE], m1
291 ;-----------------------------------------------------------------------------
292 ; void x264_add8x8_idct8_mmx( uint8_t *dst, int16_t dct[8][8] )
293 ;-----------------------------------------------------------------------------
294 cglobal x264_add8x8_idct8_mmx, 2,2
295 global x264_add8x8_idct8_mmx %+ .skip_prologue
299 UNSPILL r1, 1,2,3,5,6,7
302 TRANSPOSE4x4W 0,1,2,3,7
305 TRANSPOSE4x4W 4,5,6,7,0
308 UNSPILL r1+8, 1,2,3,5,6,7
313 TRANSPOSE4x4W 0,1,2,3,7
316 TRANSPOSE4x4W 4,5,6,7,0
323 ; memory layout at this time:
332 UNSPILL_SHUFFLE r1, 1,2,3, 5,6,7
345 movq [r1+0x08], m0 ; mm4
346 movq [r1+0x48], m4 ; mm5
347 movq [r1+0x58], m5 ; mm0
348 movq [r1+0x68], m6 ; mm2
349 movq [r1+0x78], m7 ; mm6
352 movq [r1+0x18], m1 ; mm1
353 movq [r1+0x28], m2 ; mm7
355 movq [r1+0x38], m3 ; mm3
370 ADD_STORE_ROW 0, [r1+0x00], [r1+0x08]
371 ADD_STORE_ROW 1, [r1+0x10], [r1+0x18]
372 ADD_STORE_ROW 2, [r1+0x20], [r1+0x28]
373 ADD_STORE_ROW 3, m3, [r1+0x38]
374 ADD_STORE_ROW 4, m4, [r1+0x48]
375 ADD_STORE_ROW 5, m5, [r1+0x58]
376 ADD_STORE_ROW 6, m6, [r1+0x68]
377 ADD_STORE_ROW 7, m7, [r1+0x78]
384 ; in: m0..m7, except m6 which is in [%9+0x60]
385 ; out: m0..m7, except m4 which is in [%9+0x40]
386 %macro TRANSPOSE8x8W 9
387 SBUTTERFLY wd, %1, %2, %7
389 movdqa m%7, [%9+0x60]
390 SBUTTERFLY wd, %3, %4, %2
391 SBUTTERFLY wd, %5, %6, %2
392 SBUTTERFLY wd, %7, %8, %2
393 SBUTTERFLY dq, %1, %3, %2
396 SBUTTERFLY dq, %2, %4, %3
397 SBUTTERFLY dq, %5, %7, %3
398 SBUTTERFLY dq, %6, %8, %3
399 SBUTTERFLY qdq, %1, %5, %3
400 SBUTTERFLY qdq, %2, %6, %3
401 movdqa [%9+0x40], m%2
403 SBUTTERFLY qdq, %3, %7, %2
404 SBUTTERFLY qdq, %4, %8, %2
409 ;-----------------------------------------------------------------------------
410 ; void x264_sub8x8_dct8_sse2( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
411 ;-----------------------------------------------------------------------------
412 cglobal x264_sub8x8_dct8_sse2, 3,3
413 global x264_sub8x8_dct8_sse2 %+ .skip_prologue
415 LOAD_DIFF_8P m0, m7, [r1+0*FENC_STRIDE], [r2+0*FDEC_STRIDE]
416 LOAD_DIFF_8P m1, m7, [r1+1*FENC_STRIDE], [r2+1*FDEC_STRIDE]
417 LOAD_DIFF_8P m2, m7, [r1+2*FENC_STRIDE], [r2+2*FDEC_STRIDE]
418 LOAD_DIFF_8P m3, m7, [r1+3*FENC_STRIDE], [r2+3*FDEC_STRIDE]
419 LOAD_DIFF_8P m4, m7, [r1+4*FENC_STRIDE], [r2+4*FDEC_STRIDE]
420 LOAD_DIFF_8P m5, m7, [r1+5*FENC_STRIDE], [r2+5*FDEC_STRIDE]
422 LOAD_DIFF_8P m6, m7, [r1+6*FENC_STRIDE], [r2+6*FDEC_STRIDE]
423 LOAD_DIFF_8P m7, m0, [r1+7*FENC_STRIDE], [r2+7*FDEC_STRIDE]
425 DCT8_1D 0,1,2,3,4,5,6,7,r0
427 TRANSPOSE8x8W 0,1,2,3,4,5,6,7,r0
429 DCT8_1D 0,1,2,3,4,5,6,7,r0
433 ;-----------------------------------------------------------------------------
434 ; void x264_add8x8_idct8_sse2( uint8_t *p_dst, int16_t dct[8][8] )
435 ;-----------------------------------------------------------------------------
436 cglobal x264_add8x8_idct8_sse2, 2,2
437 global x264_add8x8_idct8_sse2 %+ .skip_prologue
439 UNSPILL r1, 1,2,3,5,6,7
440 IDCT8_1D 0,1,2,3,4,5,6,7,r1
442 TRANSPOSE8x8W 0,1,2,3,4,5,6,7,r1
444 paddw m0, [pw_32 GLOBAL]
446 IDCT8_1D 0,1,2,3,4,5,6,7,r1
449 STORE_DIFF_8P m0, [r0+FDEC_STRIDE*0], m6, m7
450 STORE_DIFF_8P m1, [r0+FDEC_STRIDE*1], m6, m7
451 STORE_DIFF_8P m2, [r0+FDEC_STRIDE*2], m6, m7
452 STORE_DIFF_8P m3, [r0+FDEC_STRIDE*3], m6, m7
453 STORE_DIFF_8P m4, [r0+FDEC_STRIDE*4], m6, m7
454 STORE_DIFF_8P m5, [r0+FDEC_STRIDE*5], m6, m7
455 UNSPILL_SHUFFLE r1, 0,1, 6,7
456 STORE_DIFF_8P m0, [r0+FDEC_STRIDE*6], m6, m7
457 STORE_DIFF_8P m1, [r0+FDEC_STRIDE*7], m6, m7