1 /*****************************************************************************
2 * dct.c: h264 encoder library
3 *****************************************************************************
4 * Copyright (C) 2003-2008 x264 project
6 * Authors: Guillaume Poirier <gpoirier@mplayerhq.hu>
7 * Eric Petit <titer@m0k.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22 *****************************************************************************/
28 #include "common/common.h"
29 #include "ppccommon.h"
31 #define VEC_DCT(a0,a1,a2,a3,b0,b1,b2,b3) \
32 b1 = vec_add( a0, a3 ); \
33 b3 = vec_add( a1, a2 ); \
34 b0 = vec_add( b1, b3 ); \
35 b2 = vec_sub( b1, b3 ); \
36 a0 = vec_sub( a0, a3 ); \
37 a1 = vec_sub( a1, a2 ); \
38 b1 = vec_add( a0, a0 ); \
39 b1 = vec_add( b1, a1 ); \
40 b3 = vec_sub( a0, a1 ); \
41 b3 = vec_sub( b3, a1 )
43 void x264_sub4x4_dct_altivec( int16_t dct[4][4],
44 uint8_t *pix1, uint8_t *pix2 )
46 PREP_DIFF_8BYTEALIGNED;
47 vec_s16_t dct0v, dct1v, dct2v, dct3v;
48 vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v;
52 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct0v );
53 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct1v );
54 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct2v );
55 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct3v );
56 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
57 VEC_TRANSPOSE_4( tmp0v, tmp1v, tmp2v, tmp3v,
58 dct0v, dct1v, dct2v, dct3v );
59 permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17);
60 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
62 vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, (int16_t*)dct);
63 vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, (int16_t*)dct);
66 void x264_sub8x8_dct_altivec( int16_t dct[4][4][4],
67 uint8_t *pix1, uint8_t *pix2 )
69 PREP_DIFF_8BYTEALIGNED;
70 vec_s16_t dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v;
71 vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v, tmp4v, tmp5v, tmp6v, tmp7v;
73 vec_u8_t permHighv, permLowv;
75 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct0v );
76 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct1v );
77 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct2v );
78 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct3v );
79 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct4v );
80 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct5v );
81 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct6v );
82 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct7v );
83 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
84 VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
85 VEC_TRANSPOSE_8( tmp0v, tmp1v, tmp2v, tmp3v,
86 tmp4v, tmp5v, tmp6v, tmp7v,
87 dct0v, dct1v, dct2v, dct3v,
88 dct4v, dct5v, dct6v, dct7v );
90 permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17);
91 permLowv = (vec_u8_t) CV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
93 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
94 VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
96 vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, (int16_t*)dct);
97 vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, (int16_t*)dct);
98 vec_st(vec_perm(tmp4v, tmp5v, permHighv), 32, (int16_t*)dct);
99 vec_st(vec_perm(tmp6v, tmp7v, permHighv), 48, (int16_t*)dct);
100 vec_st(vec_perm(tmp0v, tmp1v, permLowv), 64, (int16_t*)dct);
101 vec_st(vec_perm(tmp2v, tmp3v, permLowv), 80, (int16_t*)dct);
102 vec_st(vec_perm(tmp4v, tmp5v, permLowv), 96, (int16_t*)dct);
103 vec_st(vec_perm(tmp6v, tmp7v, permLowv), 112, (int16_t*)dct);
106 void x264_sub16x16_dct_altivec( int16_t dct[16][4][4],
107 uint8_t *pix1, uint8_t *pix2 )
109 x264_sub8x8_dct_altivec( &dct[ 0], &pix1[0], &pix2[0] );
110 x264_sub8x8_dct_altivec( &dct[ 4], &pix1[8], &pix2[8] );
111 x264_sub8x8_dct_altivec( &dct[ 8], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
112 x264_sub8x8_dct_altivec( &dct[12], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
115 /***************************************************************************
117 ***************************************************************************/
119 /* DCT8_1D unrolled by 8 in Altivec */
120 #define DCT8_1D_ALTIVEC( dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v ) \
122 /* int s07 = SRC(0) + SRC(7); */ \
123 vec_s16_t s07v = vec_add( dct0v, dct7v); \
124 /* int s16 = SRC(1) + SRC(6); */ \
125 vec_s16_t s16v = vec_add( dct1v, dct6v); \
126 /* int s25 = SRC(2) + SRC(5); */ \
127 vec_s16_t s25v = vec_add( dct2v, dct5v); \
128 /* int s34 = SRC(3) + SRC(4); */ \
129 vec_s16_t s34v = vec_add( dct3v, dct4v); \
131 /* int a0 = s07 + s34; */ \
132 vec_s16_t a0v = vec_add(s07v, s34v); \
133 /* int a1 = s16 + s25; */ \
134 vec_s16_t a1v = vec_add(s16v, s25v); \
135 /* int a2 = s07 - s34; */ \
136 vec_s16_t a2v = vec_sub(s07v, s34v); \
137 /* int a3 = s16 - s25; */ \
138 vec_s16_t a3v = vec_sub(s16v, s25v); \
140 /* int d07 = SRC(0) - SRC(7); */ \
141 vec_s16_t d07v = vec_sub( dct0v, dct7v); \
142 /* int d16 = SRC(1) - SRC(6); */ \
143 vec_s16_t d16v = vec_sub( dct1v, dct6v); \
144 /* int d25 = SRC(2) - SRC(5); */ \
145 vec_s16_t d25v = vec_sub( dct2v, dct5v); \
146 /* int d34 = SRC(3) - SRC(4); */ \
147 vec_s16_t d34v = vec_sub( dct3v, dct4v); \
149 /* int a4 = d16 + d25 + (d07 + (d07>>1)); */ \
150 vec_s16_t a4v = vec_add( vec_add(d16v, d25v), vec_add(d07v, vec_sra(d07v, onev)) );\
151 /* int a5 = d07 - d34 - (d25 + (d25>>1)); */ \
152 vec_s16_t a5v = vec_sub( vec_sub(d07v, d34v), vec_add(d25v, vec_sra(d25v, onev)) );\
153 /* int a6 = d07 + d34 - (d16 + (d16>>1)); */ \
154 vec_s16_t a6v = vec_sub( vec_add(d07v, d34v), vec_add(d16v, vec_sra(d16v, onev)) );\
155 /* int a7 = d16 - d25 + (d34 + (d34>>1)); */ \
156 vec_s16_t a7v = vec_add( vec_sub(d16v, d25v), vec_add(d34v, vec_sra(d34v, onev)) );\
158 /* DST(0) = a0 + a1; */ \
159 dct0v = vec_add( a0v, a1v ); \
160 /* DST(1) = a4 + (a7>>2); */ \
161 dct1v = vec_add( a4v, vec_sra(a7v, twov) ); \
162 /* DST(2) = a2 + (a3>>1); */ \
163 dct2v = vec_add( a2v, vec_sra(a3v, onev) ); \
164 /* DST(3) = a5 + (a6>>2); */ \
165 dct3v = vec_add( a5v, vec_sra(a6v, twov) ); \
166 /* DST(4) = a0 - a1; */ \
167 dct4v = vec_sub( a0v, a1v ); \
168 /* DST(5) = a6 - (a5>>2); */ \
169 dct5v = vec_sub( a6v, vec_sra(a5v, twov) ); \
170 /* DST(6) = (a2>>1) - a3 ; */ \
171 dct6v = vec_sub( vec_sra(a2v, onev), a3v ); \
172 /* DST(7) = (a4>>2) - a7 ; */ \
173 dct7v = vec_sub( vec_sra(a4v, twov), a7v ); \
177 void x264_sub8x8_dct8_altivec( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
179 vec_u16_t onev = vec_splat_u16(1);
180 vec_u16_t twov = vec_add( onev, onev );
182 PREP_DIFF_8BYTEALIGNED;
184 vec_s16_t dct0v, dct1v, dct2v, dct3v,
185 dct4v, dct5v, dct6v, dct7v;
187 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct0v );
188 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct1v );
189 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct2v );
190 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct3v );
192 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct4v );
193 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct5v );
194 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct6v );
195 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct7v );
197 DCT8_1D_ALTIVEC( dct0v, dct1v, dct2v, dct3v,
198 dct4v, dct5v, dct6v, dct7v );
200 vec_s16_t dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
201 dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v;
203 VEC_TRANSPOSE_8(dct0v, dct1v, dct2v, dct3v,
204 dct4v, dct5v, dct6v, dct7v,
205 dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
206 dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v );
208 DCT8_1D_ALTIVEC( dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
209 dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v );
211 vec_st( dct_tr0v, 0, (signed short *)dct );
212 vec_st( dct_tr1v, 16, (signed short *)dct );
213 vec_st( dct_tr2v, 32, (signed short *)dct );
214 vec_st( dct_tr3v, 48, (signed short *)dct );
216 vec_st( dct_tr4v, 64, (signed short *)dct );
217 vec_st( dct_tr5v, 80, (signed short *)dct );
218 vec_st( dct_tr6v, 96, (signed short *)dct );
219 vec_st( dct_tr7v, 112, (signed short *)dct );
222 void x264_sub16x16_dct8_altivec( int16_t dct[4][8][8], uint8_t *pix1, uint8_t *pix2 )
224 x264_sub8x8_dct8_altivec( dct[0], &pix1[0], &pix2[0] );
225 x264_sub8x8_dct8_altivec( dct[1], &pix1[8], &pix2[8] );
226 x264_sub8x8_dct8_altivec( dct[2], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
227 x264_sub8x8_dct8_altivec( dct[3], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
231 /****************************************************************************
233 ****************************************************************************/
235 #define IDCT_1D_ALTIVEC(s0, s1, s2, s3, d0, d1, d2, d3) \
237 /* a0 = SRC(0) + SRC(2); */ \
238 vec_s16_t a0v = vec_add(s0, s2); \
239 /* a1 = SRC(0) - SRC(2); */ \
240 vec_s16_t a1v = vec_sub(s0, s2); \
241 /* a2 = (SRC(1)>>1) - SRC(3); */ \
242 vec_s16_t a2v = vec_sub(vec_sra(s1, onev), s3); \
243 /* a3 = (SRC(3)>>1) + SRC(1); */ \
244 vec_s16_t a3v = vec_add(vec_sra(s3, onev), s1); \
245 /* DST(0, a0 + a3); */ \
246 d0 = vec_add(a0v, a3v); \
247 /* DST(1, a1 + a2); */ \
248 d1 = vec_add(a1v, a2v); \
249 /* DST(2, a1 - a2); */ \
250 d2 = vec_sub(a1v, a2v); \
251 /* DST(3, a0 - a3); */ \
252 d3 = vec_sub(a0v, a3v); \
255 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
256 vdst_orig = vec_ld(0, dst); \
257 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
258 vdst_ss = (vec_s16_t)vec_mergeh(zero_u8v, vdst); \
259 va = vec_add(va, vdst_ss); \
260 va_u8 = vec_s16_to_u8(va); \
261 va_u32 = vec_splat((vec_u32_t)va_u8, 0); \
262 vec_ste(va_u32, element, (uint32_t*)dst);
264 #define ALTIVEC_STORE4_SUM_CLIP(dest, idctv, perm_ldv) \
266 /* unaligned load */ \
267 vec_u8_t lv = vec_ld(0, dest); \
268 vec_u8_t dstv = vec_perm(lv, zero_u8v, (vec_u8_t)perm_ldv); \
269 vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
270 vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv); \
271 vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
272 vec_u8_t idstsum8 = vec_s16_to_u8(idstsum); \
273 /* unaligned store */ \
274 vec_u32_t bodyv = vec_splat((vec_u32_t)idstsum8, 0); \
275 int element = ((unsigned long)dest & 0xf) >> 2; \
276 vec_ste(bodyv, element, (uint32_t *)dest); \
279 void x264_add4x4_idct_altivec( uint8_t *dst, int16_t dct[4][4] )
281 vec_u16_t onev = vec_splat_u16(1);
283 dct[0][0] += 32; // rounding for the >>6 at the end
285 vec_s16_t s0, s1, s2, s3;
287 s0 = vec_ld( 0x00, (int16_t*)dct );
288 s1 = vec_sld( s0, s0, 8 );
289 s2 = vec_ld( 0x10, (int16_t*)dct );
290 s3 = vec_sld( s2, s2, 8 );
292 vec_s16_t d0, d1, d2, d3;
293 IDCT_1D_ALTIVEC( s0, s1, s2, s3, d0, d1, d2, d3 );
295 vec_s16_t tr0, tr1, tr2, tr3;
297 VEC_TRANSPOSE_4( d0, d1, d2, d3, tr0, tr1, tr2, tr3 );
299 vec_s16_t idct0, idct1, idct2, idct3;
300 IDCT_1D_ALTIVEC( tr0, tr1, tr2, tr3, idct0, idct1, idct2, idct3 );
302 vec_u8_t perm_ldv = vec_lvsl( 0, dst );
303 vec_u16_t sixv = vec_splat_u16(6);
306 ALTIVEC_STORE4_SUM_CLIP( &dst[0*FDEC_STRIDE], idct0, perm_ldv );
307 ALTIVEC_STORE4_SUM_CLIP( &dst[1*FDEC_STRIDE], idct1, perm_ldv );
308 ALTIVEC_STORE4_SUM_CLIP( &dst[2*FDEC_STRIDE], idct2, perm_ldv );
309 ALTIVEC_STORE4_SUM_CLIP( &dst[3*FDEC_STRIDE], idct3, perm_ldv );
312 void x264_add8x8_idct_altivec( uint8_t *p_dst, int16_t dct[4][4][4] )
314 x264_add4x4_idct_altivec( &p_dst[0], dct[0] );
315 x264_add4x4_idct_altivec( &p_dst[4], dct[1] );
316 x264_add4x4_idct_altivec( &p_dst[4*FDEC_STRIDE+0], dct[2] );
317 x264_add4x4_idct_altivec( &p_dst[4*FDEC_STRIDE+4], dct[3] );
320 void x264_add16x16_idct_altivec( uint8_t *p_dst, int16_t dct[16][4][4] )
322 x264_add8x8_idct_altivec( &p_dst[0], &dct[0] );
323 x264_add8x8_idct_altivec( &p_dst[8], &dct[4] );
324 x264_add8x8_idct_altivec( &p_dst[8*FDEC_STRIDE+0], &dct[8] );
325 x264_add8x8_idct_altivec( &p_dst[8*FDEC_STRIDE+8], &dct[12] );
328 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7)\
330 /* a0 = SRC(0) + SRC(4); */ \
331 vec_s16_t a0v = vec_add(s0, s4); \
332 /* a2 = SRC(0) - SRC(4); */ \
333 vec_s16_t a2v = vec_sub(s0, s4); \
334 /* a4 = (SRC(2)>>1) - SRC(6); */ \
335 vec_s16_t a4v = vec_sub(vec_sra(s2, onev), s6); \
336 /* a6 = (SRC(6)>>1) + SRC(2); */ \
337 vec_s16_t a6v = vec_add(vec_sra(s6, onev), s2); \
338 /* b0 = a0 + a6; */ \
339 vec_s16_t b0v = vec_add(a0v, a6v); \
340 /* b2 = a2 + a4; */ \
341 vec_s16_t b2v = vec_add(a2v, a4v); \
342 /* b4 = a2 - a4; */ \
343 vec_s16_t b4v = vec_sub(a2v, a4v); \
344 /* b6 = a0 - a6; */ \
345 vec_s16_t b6v = vec_sub(a0v, a6v); \
346 /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
347 /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
348 vec_s16_t a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) );\
349 /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
350 /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
351 vec_s16_t a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
352 /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
353 /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
354 vec_s16_t a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
355 /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
356 vec_s16_t a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
357 /* b1 = (a7>>2) + a1; */ \
358 vec_s16_t b1v = vec_add( vec_sra(a7v, twov), a1v); \
359 /* b3 = a3 + (a5>>2); */ \
360 vec_s16_t b3v = vec_add(a3v, vec_sra(a5v, twov)); \
361 /* b5 = (a3>>2) - a5; */ \
362 vec_s16_t b5v = vec_sub( vec_sra(a3v, twov), a5v); \
363 /* b7 = a7 - (a1>>2); */ \
364 vec_s16_t b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
365 /* DST(0, b0 + b7); */ \
366 d0 = vec_add(b0v, b7v); \
367 /* DST(1, b2 + b5); */ \
368 d1 = vec_add(b2v, b5v); \
369 /* DST(2, b4 + b3); */ \
370 d2 = vec_add(b4v, b3v); \
371 /* DST(3, b6 + b1); */ \
372 d3 = vec_add(b6v, b1v); \
373 /* DST(4, b6 - b1); */ \
374 d4 = vec_sub(b6v, b1v); \
375 /* DST(5, b4 - b3); */ \
376 d5 = vec_sub(b4v, b3v); \
377 /* DST(6, b2 - b5); */ \
378 d6 = vec_sub(b2v, b5v); \
379 /* DST(7, b0 - b7); */ \
380 d7 = vec_sub(b0v, b7v); \
383 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel)\
385 /* unaligned load */ \
386 vec_u8_t hv = vec_ld( 0, dest ); \
387 vec_u8_t lv = vec_ld( 7, dest ); \
388 vec_u8_t dstv = vec_perm( hv, lv, (vec_u8_t)perm_ldv ); \
389 vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
390 vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv); \
391 vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
392 vec_u8_t idstsum8 = vec_packsu(zero_s16v, idstsum); \
393 /* unaligned store */ \
394 vec_u8_t bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
395 vec_u8_t edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
396 lv = vec_sel( lv, bodyv, edgelv ); \
397 vec_st( lv, 7, dest ); \
398 hv = vec_ld( 0, dest ); \
399 vec_u8_t edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
400 hv = vec_sel( hv, bodyv, edgehv ); \
401 vec_st( hv, 0, dest ); \
404 void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[8][8] )
406 vec_u16_t onev = vec_splat_u16(1);
407 vec_u16_t twov = vec_splat_u16(2);
409 dct[0][0] += 32; // rounding for the >>6 at the end
411 vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;
413 s0 = vec_ld(0x00, (int16_t*)dct);
414 s1 = vec_ld(0x10, (int16_t*)dct);
415 s2 = vec_ld(0x20, (int16_t*)dct);
416 s3 = vec_ld(0x30, (int16_t*)dct);
417 s4 = vec_ld(0x40, (int16_t*)dct);
418 s5 = vec_ld(0x50, (int16_t*)dct);
419 s6 = vec_ld(0x60, (int16_t*)dct);
420 s7 = vec_ld(0x70, (int16_t*)dct);
422 vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;
423 IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7);
425 vec_s16_t tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7;
427 VEC_TRANSPOSE_8( d0, d1, d2, d3, d4, d5, d6, d7,
428 tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7);
430 vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
431 IDCT8_1D_ALTIVEC(tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7,
432 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
434 vec_u8_t perm_ldv = vec_lvsl(0, dst);
435 vec_u8_t perm_stv = vec_lvsr(8, dst);
436 vec_u16_t sixv = vec_splat_u16(6);
437 const vec_u8_t sel = (vec_u8_t) CV(0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1);
440 ALTIVEC_STORE_SUM_CLIP(&dst[0*FDEC_STRIDE], idct0, perm_ldv, perm_stv, sel);
441 ALTIVEC_STORE_SUM_CLIP(&dst[1*FDEC_STRIDE], idct1, perm_ldv, perm_stv, sel);
442 ALTIVEC_STORE_SUM_CLIP(&dst[2*FDEC_STRIDE], idct2, perm_ldv, perm_stv, sel);
443 ALTIVEC_STORE_SUM_CLIP(&dst[3*FDEC_STRIDE], idct3, perm_ldv, perm_stv, sel);
444 ALTIVEC_STORE_SUM_CLIP(&dst[4*FDEC_STRIDE], idct4, perm_ldv, perm_stv, sel);
445 ALTIVEC_STORE_SUM_CLIP(&dst[5*FDEC_STRIDE], idct5, perm_ldv, perm_stv, sel);
446 ALTIVEC_STORE_SUM_CLIP(&dst[6*FDEC_STRIDE], idct6, perm_ldv, perm_stv, sel);
447 ALTIVEC_STORE_SUM_CLIP(&dst[7*FDEC_STRIDE], idct7, perm_ldv, perm_stv, sel);
450 void x264_add16x16_idct8_altivec( uint8_t *dst, int16_t dct[4][8][8] )
452 x264_add8x8_idct8_altivec( &dst[0], dct[0] );
453 x264_add8x8_idct8_altivec( &dst[8], dct[1] );
454 x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+0], dct[2] );
455 x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+8], dct[3] );
458 void x264_zigzag_scan_4x4_frame_altivec( int16_t level[16], int16_t dct[4][4] )
460 vec_s16_t dct0v, dct1v;
461 vec_s16_t tmp0v, tmp1v;
463 dct0v = vec_ld(0x00, (int16_t*)dct);
464 dct1v = vec_ld(0x10, (int16_t*)dct);
466 const vec_u8_t sel0 = (vec_u8_t) CV(0,1,8,9,2,3,4,5,10,11,16,17,24,25,18,19);
467 const vec_u8_t sel1 = (vec_u8_t) CV(12,13,6,7,14,15,20,21,26,27,28,29,22,23,30,31);
469 tmp0v = vec_perm( dct0v, dct1v, sel0 );
470 tmp1v = vec_perm( dct0v, dct1v, sel1 );
472 vec_st( tmp0v, 0x00, level );
473 vec_st( tmp1v, 0x10, level );
476 void x264_zigzag_scan_4x4_field_altivec( int16_t level[16], int16_t dct[4][4] )
478 vec_s16_t dct0v, dct1v;
479 vec_s16_t tmp0v, tmp1v;
481 dct0v = vec_ld(0x00, (int16_t*)dct);
482 dct1v = vec_ld(0x10, (int16_t*)dct);
484 const vec_u8_t sel0 = (vec_u8_t) CV(0,1,2,3,8,9,4,5,6,7,10,11,12,13,14,15);
486 tmp0v = vec_perm( dct0v, dct1v, sel0 );
489 vec_st( tmp0v, 0x00, level );
490 vec_st( tmp1v, 0x10, level );