1 ;*****************************************************************************
2 ;* dct-32.asm: x86_32 transform and zigzag
3 ;*****************************************************************************
4 ;* Copyright (C) 2003-2011 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Holger Lubitz <holger@lubitz.org>
8 ;* Laurent Aimar <fenrir@via.ecp.fr>
9 ;* Min Chen <chenm001.163.com>
10 ;* Christian Heine <sennindemokrit@gmx.net>
12 ;* This program is free software; you can redistribute it and/or modify
13 ;* it under the terms of the GNU General Public License as published by
14 ;* the Free Software Foundation; either version 2 of the License, or
15 ;* (at your option) any later version.
17 ;* This program is distributed in the hope that it will be useful,
18 ;* but WITHOUT ANY WARRANTY; without even the implied warranty of
19 ;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 ;* GNU General Public License for more details.
22 ;* You should have received a copy of the GNU General Public License
23 ;* along with this program; if not, write to the Free Software
24 ;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
26 ;* This program is also available under a commercial proprietary license.
27 ;* For more information, contact us at licensing@x264.com.
28 ;*****************************************************************************
31 %include "x86util.asm"
35 %ifndef HIGH_BIT_DEPTH
40 ; out: 0,4,6 in mem, rest in regs
42 SUMSUB_BA w, m%8, m%1 ; %8 = s07, %1 = d07
43 SUMSUB_BA w, m%7, m%2 ; %7 = s16, %2 = d16
44 SUMSUB_BA w, m%6, m%3 ; %6 = s25, %3 = d25
45 SUMSUB_BA w, m%5, m%4 ; %5 = s34, %4 = d34
46 SUMSUB_BA w, m%5, m%8 ; %5 = a0, %8 = a2
47 SUMSUB_BA w, m%6, m%7 ; %6 = a1, %7 = a3
48 SUMSUB_BA w, m%6, m%5 ; %6 = dst0, %5 = dst4
53 paddw m%6, m%8 ; a2 + (a3>>1)
55 psubw m%8, m%7 ; (a2>>1) - a3
59 paddw m%5, m%3 ; d25+(d25>>1)
61 psubw m%7, m%4 ; a5 = d07-d34-(d25+(d25>>1))
65 paddw m%5, m%2 ; d16+(d16>>1)
68 psubw m%8, m%5 ; a6 = d07+d34-(d16+(d16>>1))
71 paddw m%5, m%1 ; d07+(d07>>1)
73 paddw m%5, m%3 ; a4 = d16+d25+(d07+(d07>>1))
76 paddw m%1, m%4 ; d34+(d34>>1)
78 psubw m%1, m%3 ; a7 = d16-d25+(d34+(d34>>1))
81 paddw m%4, m%5 ; a4 + (a7>>2)
84 paddw m%3, m%7 ; a5 + (a6>>2)
87 psubw m%5, m%1 ; (a4>>2) - a7
88 psubw m%8, m%7 ; a6 - (a5>>2)
89 SWAP %2, %4, %3, %6, %8, %5
92 ; in: 0,4 in mem, rest in regs
131 SUMSUB_BA w, m%6, m%1
132 SUMSUB_BA w, m%7, m%6
133 SUMSUB_BA w, m%3, m%1
134 SUMSUB_BA w, m%5, m%7
135 SUMSUB_BA w, m%2, m%3
136 SUMSUB_BA w, m%8, m%1
137 SUMSUB_BA w, m%4, m%6
145 LOAD_DIFF m0, m7, none, [r1+0*FENC_STRIDE], [r2+0*FDEC_STRIDE]
146 LOAD_DIFF m1, m7, none, [r1+1*FENC_STRIDE], [r2+1*FDEC_STRIDE]
147 LOAD_DIFF m2, m7, none, [r1+2*FENC_STRIDE], [r2+2*FDEC_STRIDE]
148 LOAD_DIFF m3, m7, none, [r1+3*FENC_STRIDE], [r2+3*FDEC_STRIDE]
149 LOAD_DIFF m4, m7, none, [r1+4*FENC_STRIDE], [r2+4*FDEC_STRIDE]
150 LOAD_DIFF m5, m7, none, [r1+5*FENC_STRIDE], [r2+5*FDEC_STRIDE]
152 LOAD_DIFF m6, m7, none, [r1+6*FENC_STRIDE], [r2+6*FDEC_STRIDE]
153 LOAD_DIFF m7, m0, none, [r1+7*FENC_STRIDE], [r2+7*FDEC_STRIDE]
160 DCT8_1D 0,1,2,3,4,5,6,7,r0
161 SAVE_MM_PERMUTATION dct8_mmx
164 %macro SPILL_SHUFFLE 3-* ; ptr, list of regs, list of memory offsets
169 mova [%%base + %2*16], %%tmp
174 %macro UNSPILL_SHUFFLE 3-*
179 mova %%tmp, [%%base + %2*16]
184 %macro SPILL 2+ ; assume offsets are the same as reg numbers
185 SPILL_SHUFFLE %1, %2, %2
189 UNSPILL_SHUFFLE %1, %2, %2
192 ;-----------------------------------------------------------------------------
193 ; void sub8x8_dct8( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
194 ;-----------------------------------------------------------------------------
195 cglobal sub8x8_dct8_mmx, 3,3
196 global sub8x8_dct8_mmx.skip_prologue
199 call load_diff_4x8_mmx
202 TRANSPOSE4x4W 0,1,2,3,4
205 TRANSPOSE4x4W 4,5,6,7,0
211 call load_diff_4x8_mmx
217 TRANSPOSE4x4W 4,5,6,7,0
220 TRANSPOSE4x4W 0,1,2,3,5
222 SPILL_SHUFFLE r0, 0,1,2,3, 4,5,6,7
223 movq mm4, m6 ; depends on the permutation to not produce conflicts
229 UNSPILL r0+8, 4,5,6,7
233 SPILL r0+8, 1,2,3,5,7
235 UNSPILL r0, 0,1,2,3,4,5,6,7
243 IDCT8_1D 0,1,2,3,4,5,6,7,r1
244 SAVE_MM_PERMUTATION idct8_mmx
247 %macro ADD_STORE_ROW 3
248 movq m1, [r0+%1*FDEC_STRIDE]
255 movq [r0+%1*FDEC_STRIDE], m1
258 ;-----------------------------------------------------------------------------
259 ; void add8x8_idct8( uint8_t *dst, int16_t dct[8][8] )
260 ;-----------------------------------------------------------------------------
261 cglobal add8x8_idct8_mmx, 2,2
262 global add8x8_idct8_mmx.skip_prologue
266 UNSPILL r1, 1,2,3,5,6,7
269 TRANSPOSE4x4W 0,1,2,3,7
272 TRANSPOSE4x4W 4,5,6,7,0
275 UNSPILL r1+8, 1,2,3,5,6,7
280 TRANSPOSE4x4W 0,1,2,3,7
283 TRANSPOSE4x4W 4,5,6,7,0
290 ; memory layout at this time:
299 UNSPILL_SHUFFLE r1, 1,2,3, 5,6,7
312 movq [r1+0x08], m0 ; mm4
313 movq [r1+0x48], m4 ; mm5
314 movq [r1+0x58], m5 ; mm0
315 movq [r1+0x68], m6 ; mm2
316 movq [r1+0x78], m7 ; mm6
319 movq [r1+0x18], m1 ; mm1
320 movq [r1+0x28], m2 ; mm7
322 movq [r1+0x38], m3 ; mm3
337 ADD_STORE_ROW 0, [r1+0x00], [r1+0x08]
338 ADD_STORE_ROW 1, [r1+0x10], [r1+0x18]
339 ADD_STORE_ROW 2, [r1+0x20], [r1+0x28]
340 ADD_STORE_ROW 3, m3, [r1+0x38]
341 ADD_STORE_ROW 4, m4, [r1+0x48]
342 ADD_STORE_ROW 5, m5, [r1+0x58]
343 ADD_STORE_ROW 6, m6, [r1+0x68]
344 ADD_STORE_ROW 7, m7, [r1+0x78]
349 cglobal sub8x8_dct_%1, 3,3
350 add r2, 4*FDEC_STRIDE
351 global sub8x8_dct_%1.skip_prologue
356 LOAD_DIFF8x4 0, 1, 2, 3, 6, 7, r1, r2-4*FDEC_STRIDE
359 LOAD_DIFF8x4 4, 5, 6, 7, 1, 2, r1, r2-4*FDEC_STRIDE
364 DCT4_1D 0, 1, 2, 3, 7
365 TRANSPOSE2x4x4W 0, 1, 2, 3, 7
368 DCT4_1D 4, 5, 6, 7, 2
369 TRANSPOSE2x4x4W 4, 5, 6, 7, 2
372 DCT4_1D 0, 1, 2, 3, 6
374 STORE_DCT 0, 1, 2, 3, r0, 0
375 DCT4_1D 4, 5, 6, 7, 3
376 STORE_DCT 4, 5, 6, 7, r0, 64
379 ;-----------------------------------------------------------------------------
380 ; void sub8x8_dct8( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
381 ;-----------------------------------------------------------------------------
382 cglobal sub8x8_dct8_%1, 3,3
383 add r2, 4*FDEC_STRIDE
384 global sub8x8_dct8_%1.skip_prologue
387 LOAD_DIFF m0, m7, none, [r1+0*FENC_STRIDE], [r2-4*FDEC_STRIDE]
388 LOAD_DIFF m1, m7, none, [r1+1*FENC_STRIDE], [r2-3*FDEC_STRIDE]
389 LOAD_DIFF m2, m7, none, [r1+2*FENC_STRIDE], [r2-2*FDEC_STRIDE]
390 LOAD_DIFF m3, m7, none, [r1+3*FENC_STRIDE], [r2-1*FDEC_STRIDE]
391 LOAD_DIFF m4, m7, none, [r1+4*FENC_STRIDE], [r2+0*FDEC_STRIDE]
392 LOAD_DIFF m5, m7, none, [r1+5*FENC_STRIDE], [r2+1*FDEC_STRIDE]
394 LOAD_DIFF m6, m7, none, [r1+6*FENC_STRIDE], [r2+2*FDEC_STRIDE]
395 LOAD_DIFF m7, m0, none, [r1+7*FENC_STRIDE], [r2+3*FDEC_STRIDE]
399 LOAD_DIFF8x4 0, 1, 2, 3, 4, 7, r1, r2-4*FDEC_STRIDE
402 LOAD_DIFF8x4 4, 5, 6, 7, 0, 1, r1, r2-4*FDEC_STRIDE
405 DCT8_1D 0,1,2,3,4,5,6,7,r0
407 TRANSPOSE8x8W 0,1,2,3,4,5,6,7,[r0+0x60],[r0+0x40],1
409 DCT8_1D 0,1,2,3,4,5,6,7,r0
414 %define LOAD_DIFF8x4 LOAD_DIFF8x4_SSE2
415 %define movdqa movaps
416 %define punpcklqdq movlhps
420 %define LOAD_DIFF8x4 LOAD_DIFF8x4_SSSE3
423 ;-----------------------------------------------------------------------------
424 ; void add8x8_idct( uint8_t *pix, int16_t dct[4][4][4] )
425 ;-----------------------------------------------------------------------------
426 cglobal add8x8_idct_sse2, 2,2
427 add r0, 4*FDEC_STRIDE
428 global add8x8_idct_sse2.skip_prologue
430 UNSPILL_SHUFFLE r1, 0,2,1,3, 0,1,2,3
431 SBUTTERFLY qdq, 0, 1, 4
432 SBUTTERFLY qdq, 2, 3, 4
433 UNSPILL_SHUFFLE r1, 4,6,5,7, 4,5,6,7
435 SBUTTERFLY qdq, 4, 5, 0
436 SBUTTERFLY qdq, 6, 7, 0
438 IDCT4_1D w,0,1,2,3,r1
440 TRANSPOSE2x4x4W 0,1,2,3,4
442 IDCT4_1D w,4,5,6,7,r1
444 TRANSPOSE2x4x4W 4,5,6,7,0
447 IDCT4_1D w,0,1,2,3,r1
449 IDCT4_1D w,4,5,6,7,r1
452 DIFFx2 m0, m1, m6, m7, [r0-4*FDEC_STRIDE], [r0-3*FDEC_STRIDE]; m5
453 DIFFx2 m2, m3, m6, m7, [r0-2*FDEC_STRIDE], [r0-1*FDEC_STRIDE]; m5
454 UNSPILL_SHUFFLE r1, 0,2, 6,7
455 DIFFx2 m4, m5, m6, m7, [r0+0*FDEC_STRIDE], [r0+1*FDEC_STRIDE]; m5
456 DIFFx2 m0, m2, m6, m7, [r0+2*FDEC_STRIDE], [r0+3*FDEC_STRIDE]; m5
457 STORE_IDCT m1, m3, m5, m2
460 ;-----------------------------------------------------------------------------
461 ; void add8x8_idct8( uint8_t *p_dst, int16_t dct[8][8] )
462 ;-----------------------------------------------------------------------------
463 cglobal add8x8_idct8_sse2, 2,2
464 add r0, 4*FDEC_STRIDE
465 global add8x8_idct8_sse2.skip_prologue
467 UNSPILL r1, 1,2,3,5,6,7
468 IDCT8_1D 0,1,2,3,4,5,6,7,r1
470 TRANSPOSE8x8W 0,1,2,3,4,5,6,7,[r1+0x60],[r1+0x40],1
473 IDCT8_1D 0,1,2,3,4,5,6,7,r1
476 DIFFx2 m0, m1, m6, m7, [r0-4*FDEC_STRIDE], [r0-3*FDEC_STRIDE]; m5
477 DIFFx2 m2, m3, m6, m7, [r0-2*FDEC_STRIDE], [r0-1*FDEC_STRIDE]; m5
478 UNSPILL_SHUFFLE r1, 0,2, 6,7
479 DIFFx2 m4, m5, m6, m7, [r0+0*FDEC_STRIDE], [r0+1*FDEC_STRIDE]; m5
480 DIFFx2 m0, m2, m6, m7, [r0+2*FDEC_STRIDE], [r0+3*FDEC_STRIDE]; m5
481 STORE_IDCT m1, m3, m5, m2
483 %endif ; !HIGH_BIT_DEPTH