1 ;*****************************************************************************
2 ;* dct-32.asm: h264 encoder library
3 ;*****************************************************************************
4 ;* Copyright (C) 2003-2008 x264 project
6 ;* Authors: Laurent Aimar <fenrir@via.ecp.fr> (initial version)
7 ;* Loren Merritt <lorenm@u.washington.edu> (misc)
8 ;* Min Chen <chenm001.163.com> (converted to nasm)
9 ;* Christian Heine <sennindemokrit@gmx.net> (dct8/idct8 functions)
11 ;* This program is free software; you can redistribute it and/or modify
12 ;* it under the terms of the GNU General Public License as published by
13 ;* the Free Software Foundation; either version 2 of the License, or
14 ;* (at your option) any later version.
16 ;* This program is distributed in the hope that it will be useful,
17 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
18 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 ;* GNU General Public License for more details.
21 ;* You should have received a copy of the GNU General Public License
22 ;* along with this program; if not, write to the Free Software
23 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 ;*****************************************************************************
27 %include "x86util.asm"
42 %macro TRANSPOSE4x4W 5
43 SBUTTERFLY wd, %1, %2, %5
44 SBUTTERFLY wd, %3, %4, %5
45 SBUTTERFLY dq, %1, %3, %5
46 SBUTTERFLY dq, %2, %4, %5
51 ; out: 0,4,6 in mem, rest in regs
53 SUMSUB_BA m%8, m%1 ; %8 = s07, %1 = d07
54 SUMSUB_BA m%7, m%2 ; %7 = s16, %2 = d16
55 SUMSUB_BA m%6, m%3 ; %6 = s25, %3 = d25
56 SUMSUB_BA m%5, m%4 ; %5 = s34, %4 = d34
57 SUMSUB_BA m%5, m%8 ; %5 = a0, %8 = a2
58 SUMSUB_BA m%6, m%7 ; %6 = a1, %7 = a3
59 SUMSUB_BA m%6, m%5 ; %6 = dst0, %5 = dst4
64 paddw m%6, m%8 ; a2 + (a3>>1)
66 psubw m%8, m%7 ; (a2>>1) - a3
70 paddw m%5, m%3 ; d25+(d25>>1)
72 psubw m%7, m%4 ; a5 = d07-d34-(d25+(d25>>1))
76 paddw m%5, m%2 ; d16+(d16>>1)
79 psubw m%8, m%5 ; a6 = d07+d34-(d16+(d16>>1))
82 paddw m%5, m%1 ; d07+(d07>>1)
84 paddw m%5, m%3 ; a4 = d16+d25+(d07+(d07>>1))
87 paddw m%1, m%4 ; d34+(d34>>1)
89 psubw m%1, m%3 ; a7 = d16-d25+(d34+(d34>>1))
92 paddw m%4, m%5 ; a4 + (a7>>2)
95 paddw m%3, m%7 ; a5 + (a6>>2)
98 psubw m%5, m%1 ; (a4>>2) - a7
99 psubw m%8, m%7 ; a6 - (a5>>2)
100 SWAP %2, %4, %3, %6, %8, %5
103 ; in: 0,4 in mem, rest in regs
156 LOAD_DIFF m0, m7, none, [r1+0*FENC_STRIDE], [r2+0*FDEC_STRIDE]
157 LOAD_DIFF m1, m7, none, [r1+1*FENC_STRIDE], [r2+1*FDEC_STRIDE]
158 LOAD_DIFF m2, m7, none, [r1+2*FENC_STRIDE], [r2+2*FDEC_STRIDE]
159 LOAD_DIFF m3, m7, none, [r1+3*FENC_STRIDE], [r2+3*FDEC_STRIDE]
160 LOAD_DIFF m4, m7, none, [r1+4*FENC_STRIDE], [r2+4*FDEC_STRIDE]
161 LOAD_DIFF m5, m7, none, [r1+5*FENC_STRIDE], [r2+5*FDEC_STRIDE]
163 LOAD_DIFF m6, m7, none, [r1+6*FENC_STRIDE], [r2+6*FDEC_STRIDE]
164 LOAD_DIFF m7, m0, none, [r1+7*FENC_STRIDE], [r2+7*FDEC_STRIDE]
171 DCT8_1D 0,1,2,3,4,5,6,7,r0
172 SAVE_MM_PERMUTATION dct8_mmx
175 %macro SPILL_SHUFFLE 3-* ; ptr, list of regs, list of memory offsets
180 mova [%%base + %2*16], %%tmp
185 %macro UNSPILL_SHUFFLE 3-*
190 mova %%tmp, [%%base + %2*16]
195 %macro SPILL 2+ ; assume offsets are the same as reg numbers
196 SPILL_SHUFFLE %1, %2, %2
200 UNSPILL_SHUFFLE %1, %2, %2
203 ;-----------------------------------------------------------------------------
204 ; void x264_sub8x8_dct8_mmx( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
205 ;-----------------------------------------------------------------------------
206 cglobal x264_sub8x8_dct8_mmx, 3,3
207 global x264_sub8x8_dct8_mmx %+ .skip_prologue
210 call load_diff_4x8_mmx
213 TRANSPOSE4x4W 0,1,2,3,4
216 TRANSPOSE4x4W 4,5,6,7,0
222 call load_diff_4x8_mmx
228 TRANSPOSE4x4W 4,5,6,7,0
231 TRANSPOSE4x4W 0,1,2,3,5
233 SPILL_SHUFFLE r0, 0,1,2,3, 4,5,6,7
234 movq mm4, m6 ; depends on the permutation to not produce conflicts
240 UNSPILL r0+8, 4,5,6,7
244 SPILL r0+8, 1,2,3,5,7
246 UNSPILL r0, 0,1,2,3,4,5,6,7
254 IDCT8_1D 0,1,2,3,4,5,6,7,r1
255 SAVE_MM_PERMUTATION idct8_mmx
258 %macro ADD_STORE_ROW 3
259 movq m1, [r0+%1*FDEC_STRIDE]
266 movq [r0+%1*FDEC_STRIDE], m1
269 ;-----------------------------------------------------------------------------
270 ; void x264_add8x8_idct8_mmx( uint8_t *dst, int16_t dct[8][8] )
271 ;-----------------------------------------------------------------------------
272 cglobal x264_add8x8_idct8_mmx, 2,2
273 global x264_add8x8_idct8_mmx %+ .skip_prologue
277 UNSPILL r1, 1,2,3,5,6,7
280 TRANSPOSE4x4W 0,1,2,3,7
283 TRANSPOSE4x4W 4,5,6,7,0
286 UNSPILL r1+8, 1,2,3,5,6,7
291 TRANSPOSE4x4W 0,1,2,3,7
294 TRANSPOSE4x4W 4,5,6,7,0
301 ; memory layout at this time:
310 UNSPILL_SHUFFLE r1, 1,2,3, 5,6,7
323 movq [r1+0x08], m0 ; mm4
324 movq [r1+0x48], m4 ; mm5
325 movq [r1+0x58], m5 ; mm0
326 movq [r1+0x68], m6 ; mm2
327 movq [r1+0x78], m7 ; mm6
330 movq [r1+0x18], m1 ; mm1
331 movq [r1+0x28], m2 ; mm7
333 movq [r1+0x38], m3 ; mm3
348 ADD_STORE_ROW 0, [r1+0x00], [r1+0x08]
349 ADD_STORE_ROW 1, [r1+0x10], [r1+0x18]
350 ADD_STORE_ROW 2, [r1+0x20], [r1+0x28]
351 ADD_STORE_ROW 3, m3, [r1+0x38]
352 ADD_STORE_ROW 4, m4, [r1+0x48]
353 ADD_STORE_ROW 5, m5, [r1+0x58]
354 ADD_STORE_ROW 6, m6, [r1+0x68]
355 ADD_STORE_ROW 7, m7, [r1+0x78]
362 ; in: m0..m7, except m6 which is in [%9+0x60]
363 ; out: m0..m7, except m4 which is in [%9+0x40]
364 %macro TRANSPOSE8x8W 9
365 SBUTTERFLY wd, %1, %2, %7
367 movdqa m%7, [%9+0x60]
368 SBUTTERFLY wd, %3, %4, %2
369 SBUTTERFLY wd, %5, %6, %2
370 SBUTTERFLY wd, %7, %8, %2
371 SBUTTERFLY dq, %1, %3, %2
374 SBUTTERFLY dq, %2, %4, %3
375 SBUTTERFLY dq, %5, %7, %3
376 SBUTTERFLY dq, %6, %8, %3
377 SBUTTERFLY qdq, %1, %5, %3
378 SBUTTERFLY qdq, %2, %6, %3
379 movdqa [%9+0x40], m%2
381 SBUTTERFLY qdq, %3, %7, %2
382 SBUTTERFLY qdq, %4, %8, %2
387 ;-----------------------------------------------------------------------------
388 ; void x264_sub8x8_dct8_sse2( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
389 ;-----------------------------------------------------------------------------
390 cglobal x264_sub8x8_dct8_sse2, 3,3
391 global x264_sub8x8_dct8_sse2 %+ .skip_prologue
393 LOAD_DIFF m0, m7, none, [r1+0*FENC_STRIDE], [r2+0*FDEC_STRIDE]
394 LOAD_DIFF m1, m7, none, [r1+1*FENC_STRIDE], [r2+1*FDEC_STRIDE]
395 LOAD_DIFF m2, m7, none, [r1+2*FENC_STRIDE], [r2+2*FDEC_STRIDE]
396 LOAD_DIFF m3, m7, none, [r1+3*FENC_STRIDE], [r2+3*FDEC_STRIDE]
397 LOAD_DIFF m4, m7, none, [r1+4*FENC_STRIDE], [r2+4*FDEC_STRIDE]
398 LOAD_DIFF m5, m7, none, [r1+5*FENC_STRIDE], [r2+5*FDEC_STRIDE]
400 LOAD_DIFF m6, m7, none, [r1+6*FENC_STRIDE], [r2+6*FDEC_STRIDE]
401 LOAD_DIFF m7, m0, none, [r1+7*FENC_STRIDE], [r2+7*FDEC_STRIDE]
403 DCT8_1D 0,1,2,3,4,5,6,7,r0
405 TRANSPOSE8x8W 0,1,2,3,4,5,6,7,r0
407 DCT8_1D 0,1,2,3,4,5,6,7,r0
411 ;-----------------------------------------------------------------------------
412 ; void x264_add8x8_idct8_sse2( uint8_t *p_dst, int16_t dct[8][8] )
413 ;-----------------------------------------------------------------------------
414 cglobal x264_add8x8_idct8_sse2, 2,2
415 global x264_add8x8_idct8_sse2 %+ .skip_prologue
417 UNSPILL r1, 1,2,3,5,6,7
418 IDCT8_1D 0,1,2,3,4,5,6,7,r1
420 TRANSPOSE8x8W 0,1,2,3,4,5,6,7,r1
422 paddw m0, [pw_32 GLOBAL]
424 IDCT8_1D 0,1,2,3,4,5,6,7,r1
427 STORE_DIFF m0, m6, m7, [r0+FDEC_STRIDE*0]
428 STORE_DIFF m1, m6, m7, [r0+FDEC_STRIDE*1]
429 STORE_DIFF m2, m6, m7, [r0+FDEC_STRIDE*2]
430 STORE_DIFF m3, m6, m7, [r0+FDEC_STRIDE*3]
431 STORE_DIFF m4, m6, m7, [r0+FDEC_STRIDE*4]
432 STORE_DIFF m5, m6, m7, [r0+FDEC_STRIDE*5]
433 UNSPILL_SHUFFLE r1, 0,1, 6,7
434 STORE_DIFF m0, m6, m7, [r0+FDEC_STRIDE*6]
435 STORE_DIFF m1, m6, m7, [r0+FDEC_STRIDE*7]