1 /*****************************************************************************
2 * dct.c: ppc transform and zigzag
3 *****************************************************************************
4 * Copyright (C) 2003-2013 x264 project
6 * Authors: Guillaume Poirier <gpoirier@mplayerhq.hu>
7 * Eric Petit <eric.petit@lapsus.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 * This program is also available under a commercial proprietary license.
24 * For more information, contact us at licensing@x264.com.
25 *****************************************************************************/
27 #include "common/common.h"
28 #include "ppccommon.h"
31 #define VEC_DCT(a0,a1,a2,a3,b0,b1,b2,b3) \
32 b1 = vec_add( a0, a3 ); \
33 b3 = vec_add( a1, a2 ); \
34 b0 = vec_add( b1, b3 ); \
35 b2 = vec_sub( b1, b3 ); \
36 a0 = vec_sub( a0, a3 ); \
37 a1 = vec_sub( a1, a2 ); \
38 b1 = vec_add( a0, a0 ); \
39 b1 = vec_add( b1, a1 ); \
40 b3 = vec_sub( a0, a1 ); \
41 b3 = vec_sub( b3, a1 )
43 void x264_sub4x4_dct_altivec( int16_t dct[16], uint8_t *pix1, uint8_t *pix2 )
45 PREP_DIFF_8BYTEALIGNED;
46 vec_s16_t dct0v, dct1v, dct2v, dct3v;
47 vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v;
51 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct0v );
52 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct1v );
53 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct2v );
54 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct3v );
55 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
56 VEC_TRANSPOSE_4( tmp0v, tmp1v, tmp2v, tmp3v,
57 dct0v, dct1v, dct2v, dct3v );
58 permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17);
59 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
61 vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, dct);
62 vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, dct);
65 void x264_sub8x8_dct_altivec( int16_t dct[4][16], uint8_t *pix1, uint8_t *pix2 )
67 PREP_DIFF_8BYTEALIGNED;
68 vec_s16_t dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v;
69 vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v, tmp4v, tmp5v, tmp6v, tmp7v;
71 vec_u8_t permHighv, permLowv;
73 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct0v );
74 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct1v );
75 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct2v );
76 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct3v );
77 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct4v );
78 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct5v );
79 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct6v );
80 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct7v );
81 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
82 VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
83 VEC_TRANSPOSE_8( tmp0v, tmp1v, tmp2v, tmp3v,
84 tmp4v, tmp5v, tmp6v, tmp7v,
85 dct0v, dct1v, dct2v, dct3v,
86 dct4v, dct5v, dct6v, dct7v );
88 permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17);
89 permLowv = (vec_u8_t) CV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
91 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
92 VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
94 vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, *dct);
95 vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, *dct);
96 vec_st(vec_perm(tmp4v, tmp5v, permHighv), 32, *dct);
97 vec_st(vec_perm(tmp6v, tmp7v, permHighv), 48, *dct);
98 vec_st(vec_perm(tmp0v, tmp1v, permLowv), 64, *dct);
99 vec_st(vec_perm(tmp2v, tmp3v, permLowv), 80, *dct);
100 vec_st(vec_perm(tmp4v, tmp5v, permLowv), 96, *dct);
101 vec_st(vec_perm(tmp6v, tmp7v, permLowv), 112, *dct);
104 void x264_sub16x16_dct_altivec( int16_t dct[16][16], uint8_t *pix1, uint8_t *pix2 )
106 x264_sub8x8_dct_altivec( &dct[ 0], &pix1[0], &pix2[0] );
107 x264_sub8x8_dct_altivec( &dct[ 4], &pix1[8], &pix2[8] );
108 x264_sub8x8_dct_altivec( &dct[ 8], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
109 x264_sub8x8_dct_altivec( &dct[12], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
112 /***************************************************************************
114 ***************************************************************************/
116 /* DCT8_1D unrolled by 8 in Altivec */
117 #define DCT8_1D_ALTIVEC( dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v ) \
119 /* int s07 = SRC(0) + SRC(7); */ \
120 vec_s16_t s07v = vec_add( dct0v, dct7v); \
121 /* int s16 = SRC(1) + SRC(6); */ \
122 vec_s16_t s16v = vec_add( dct1v, dct6v); \
123 /* int s25 = SRC(2) + SRC(5); */ \
124 vec_s16_t s25v = vec_add( dct2v, dct5v); \
125 /* int s34 = SRC(3) + SRC(4); */ \
126 vec_s16_t s34v = vec_add( dct3v, dct4v); \
128 /* int a0 = s07 + s34; */ \
129 vec_s16_t a0v = vec_add(s07v, s34v); \
130 /* int a1 = s16 + s25; */ \
131 vec_s16_t a1v = vec_add(s16v, s25v); \
132 /* int a2 = s07 - s34; */ \
133 vec_s16_t a2v = vec_sub(s07v, s34v); \
134 /* int a3 = s16 - s25; */ \
135 vec_s16_t a3v = vec_sub(s16v, s25v); \
137 /* int d07 = SRC(0) - SRC(7); */ \
138 vec_s16_t d07v = vec_sub( dct0v, dct7v); \
139 /* int d16 = SRC(1) - SRC(6); */ \
140 vec_s16_t d16v = vec_sub( dct1v, dct6v); \
141 /* int d25 = SRC(2) - SRC(5); */ \
142 vec_s16_t d25v = vec_sub( dct2v, dct5v); \
143 /* int d34 = SRC(3) - SRC(4); */ \
144 vec_s16_t d34v = vec_sub( dct3v, dct4v); \
146 /* int a4 = d16 + d25 + (d07 + (d07>>1)); */ \
147 vec_s16_t a4v = vec_add( vec_add(d16v, d25v), vec_add(d07v, vec_sra(d07v, onev)) );\
148 /* int a5 = d07 - d34 - (d25 + (d25>>1)); */ \
149 vec_s16_t a5v = vec_sub( vec_sub(d07v, d34v), vec_add(d25v, vec_sra(d25v, onev)) );\
150 /* int a6 = d07 + d34 - (d16 + (d16>>1)); */ \
151 vec_s16_t a6v = vec_sub( vec_add(d07v, d34v), vec_add(d16v, vec_sra(d16v, onev)) );\
152 /* int a7 = d16 - d25 + (d34 + (d34>>1)); */ \
153 vec_s16_t a7v = vec_add( vec_sub(d16v, d25v), vec_add(d34v, vec_sra(d34v, onev)) );\
155 /* DST(0) = a0 + a1; */ \
156 dct0v = vec_add( a0v, a1v ); \
157 /* DST(1) = a4 + (a7>>2); */ \
158 dct1v = vec_add( a4v, vec_sra(a7v, twov) ); \
159 /* DST(2) = a2 + (a3>>1); */ \
160 dct2v = vec_add( a2v, vec_sra(a3v, onev) ); \
161 /* DST(3) = a5 + (a6>>2); */ \
162 dct3v = vec_add( a5v, vec_sra(a6v, twov) ); \
163 /* DST(4) = a0 - a1; */ \
164 dct4v = vec_sub( a0v, a1v ); \
165 /* DST(5) = a6 - (a5>>2); */ \
166 dct5v = vec_sub( a6v, vec_sra(a5v, twov) ); \
167 /* DST(6) = (a2>>1) - a3 ; */ \
168 dct6v = vec_sub( vec_sra(a2v, onev), a3v ); \
169 /* DST(7) = (a4>>2) - a7 ; */ \
170 dct7v = vec_sub( vec_sra(a4v, twov), a7v ); \
174 void x264_sub8x8_dct8_altivec( int16_t dct[64], uint8_t *pix1, uint8_t *pix2 )
176 vec_u16_t onev = vec_splat_u16(1);
177 vec_u16_t twov = vec_add( onev, onev );
179 PREP_DIFF_8BYTEALIGNED;
181 vec_s16_t dct0v, dct1v, dct2v, dct3v,
182 dct4v, dct5v, dct6v, dct7v;
184 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct0v );
185 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct1v );
186 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct2v );
187 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct3v );
189 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct4v );
190 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct5v );
191 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct6v );
192 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct7v );
194 DCT8_1D_ALTIVEC( dct0v, dct1v, dct2v, dct3v,
195 dct4v, dct5v, dct6v, dct7v );
197 vec_s16_t dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
198 dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v;
200 VEC_TRANSPOSE_8(dct0v, dct1v, dct2v, dct3v,
201 dct4v, dct5v, dct6v, dct7v,
202 dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
203 dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v );
205 DCT8_1D_ALTIVEC( dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
206 dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v );
208 vec_st( dct_tr0v, 0, dct );
209 vec_st( dct_tr1v, 16, dct );
210 vec_st( dct_tr2v, 32, dct );
211 vec_st( dct_tr3v, 48, dct );
213 vec_st( dct_tr4v, 64, dct );
214 vec_st( dct_tr5v, 80, dct );
215 vec_st( dct_tr6v, 96, dct );
216 vec_st( dct_tr7v, 112, dct );
219 void x264_sub16x16_dct8_altivec( int16_t dct[4][64], uint8_t *pix1, uint8_t *pix2 )
221 x264_sub8x8_dct8_altivec( dct[0], &pix1[0], &pix2[0] );
222 x264_sub8x8_dct8_altivec( dct[1], &pix1[8], &pix2[8] );
223 x264_sub8x8_dct8_altivec( dct[2], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
224 x264_sub8x8_dct8_altivec( dct[3], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
228 /****************************************************************************
230 ****************************************************************************/
232 #define IDCT_1D_ALTIVEC(s0, s1, s2, s3, d0, d1, d2, d3) \
234 /* a0 = SRC(0) + SRC(2); */ \
235 vec_s16_t a0v = vec_add(s0, s2); \
236 /* a1 = SRC(0) - SRC(2); */ \
237 vec_s16_t a1v = vec_sub(s0, s2); \
238 /* a2 = (SRC(1)>>1) - SRC(3); */ \
239 vec_s16_t a2v = vec_sub(vec_sra(s1, onev), s3); \
240 /* a3 = (SRC(3)>>1) + SRC(1); */ \
241 vec_s16_t a3v = vec_add(vec_sra(s3, onev), s1); \
242 /* DST(0, a0 + a3); */ \
243 d0 = vec_add(a0v, a3v); \
244 /* DST(1, a1 + a2); */ \
245 d1 = vec_add(a1v, a2v); \
246 /* DST(2, a1 - a2); */ \
247 d2 = vec_sub(a1v, a2v); \
248 /* DST(3, a0 - a3); */ \
249 d3 = vec_sub(a0v, a3v); \
252 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
253 vdst_orig = vec_ld(0, dst); \
254 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
255 vdst_ss = (vec_s16_t)vec_mergeh(zero_u8v, vdst); \
256 va = vec_add(va, vdst_ss); \
257 va_u8 = vec_s16_to_u8(va); \
258 va_u32 = vec_splat((vec_u32_t)va_u8, 0); \
259 vec_ste(va_u32, element, (uint32_t*)dst);
261 #define ALTIVEC_STORE4_SUM_CLIP(dest, idctv, perm_ldv) \
263 /* unaligned load */ \
264 vec_u8_t lv = vec_ld(0, dest); \
265 vec_u8_t dstv = vec_perm(lv, zero_u8v, (vec_u8_t)perm_ldv); \
266 vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
267 vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv); \
268 vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
269 vec_u8_t idstsum8 = vec_s16_to_u8(idstsum); \
270 /* unaligned store */ \
271 vec_u32_t bodyv = vec_splat((vec_u32_t)idstsum8, 0); \
272 int element = ((unsigned long)dest & 0xf) >> 2; \
273 vec_ste(bodyv, element, (uint32_t *)dest); \
276 void x264_add4x4_idct_altivec( uint8_t *dst, int16_t dct[16] )
278 vec_u16_t onev = vec_splat_u16(1);
280 dct[0] += 32; // rounding for the >>6 at the end
282 vec_s16_t s0, s1, s2, s3;
284 s0 = vec_ld( 0x00, dct );
285 s1 = vec_sld( s0, s0, 8 );
286 s2 = vec_ld( 0x10, dct );
287 s3 = vec_sld( s2, s2, 8 );
289 vec_s16_t d0, d1, d2, d3;
290 IDCT_1D_ALTIVEC( s0, s1, s2, s3, d0, d1, d2, d3 );
292 vec_s16_t tr0, tr1, tr2, tr3;
294 VEC_TRANSPOSE_4( d0, d1, d2, d3, tr0, tr1, tr2, tr3 );
296 vec_s16_t idct0, idct1, idct2, idct3;
297 IDCT_1D_ALTIVEC( tr0, tr1, tr2, tr3, idct0, idct1, idct2, idct3 );
299 vec_u8_t perm_ldv = vec_lvsl( 0, dst );
300 vec_u16_t sixv = vec_splat_u16(6);
303 ALTIVEC_STORE4_SUM_CLIP( &dst[0*FDEC_STRIDE], idct0, perm_ldv );
304 ALTIVEC_STORE4_SUM_CLIP( &dst[1*FDEC_STRIDE], idct1, perm_ldv );
305 ALTIVEC_STORE4_SUM_CLIP( &dst[2*FDEC_STRIDE], idct2, perm_ldv );
306 ALTIVEC_STORE4_SUM_CLIP( &dst[3*FDEC_STRIDE], idct3, perm_ldv );
309 void x264_add8x8_idct_altivec( uint8_t *p_dst, int16_t dct[4][16] )
311 x264_add4x4_idct_altivec( &p_dst[0], dct[0] );
312 x264_add4x4_idct_altivec( &p_dst[4], dct[1] );
313 x264_add4x4_idct_altivec( &p_dst[4*FDEC_STRIDE+0], dct[2] );
314 x264_add4x4_idct_altivec( &p_dst[4*FDEC_STRIDE+4], dct[3] );
317 void x264_add16x16_idct_altivec( uint8_t *p_dst, int16_t dct[16][16] )
319 x264_add8x8_idct_altivec( &p_dst[0], &dct[0] );
320 x264_add8x8_idct_altivec( &p_dst[8], &dct[4] );
321 x264_add8x8_idct_altivec( &p_dst[8*FDEC_STRIDE+0], &dct[8] );
322 x264_add8x8_idct_altivec( &p_dst[8*FDEC_STRIDE+8], &dct[12] );
325 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7)\
327 /* a0 = SRC(0) + SRC(4); */ \
328 vec_s16_t a0v = vec_add(s0, s4); \
329 /* a2 = SRC(0) - SRC(4); */ \
330 vec_s16_t a2v = vec_sub(s0, s4); \
331 /* a4 = (SRC(2)>>1) - SRC(6); */ \
332 vec_s16_t a4v = vec_sub(vec_sra(s2, onev), s6); \
333 /* a6 = (SRC(6)>>1) + SRC(2); */ \
334 vec_s16_t a6v = vec_add(vec_sra(s6, onev), s2); \
335 /* b0 = a0 + a6; */ \
336 vec_s16_t b0v = vec_add(a0v, a6v); \
337 /* b2 = a2 + a4; */ \
338 vec_s16_t b2v = vec_add(a2v, a4v); \
339 /* b4 = a2 - a4; */ \
340 vec_s16_t b4v = vec_sub(a2v, a4v); \
341 /* b6 = a0 - a6; */ \
342 vec_s16_t b6v = vec_sub(a0v, a6v); \
343 /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
344 /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
345 vec_s16_t a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) );\
346 /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
347 /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
348 vec_s16_t a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
349 /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
350 /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
351 vec_s16_t a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
352 /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
353 vec_s16_t a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
354 /* b1 = (a7>>2) + a1; */ \
355 vec_s16_t b1v = vec_add( vec_sra(a7v, twov), a1v); \
356 /* b3 = a3 + (a5>>2); */ \
357 vec_s16_t b3v = vec_add(a3v, vec_sra(a5v, twov)); \
358 /* b5 = (a3>>2) - a5; */ \
359 vec_s16_t b5v = vec_sub( vec_sra(a3v, twov), a5v); \
360 /* b7 = a7 - (a1>>2); */ \
361 vec_s16_t b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
362 /* DST(0, b0 + b7); */ \
363 d0 = vec_add(b0v, b7v); \
364 /* DST(1, b2 + b5); */ \
365 d1 = vec_add(b2v, b5v); \
366 /* DST(2, b4 + b3); */ \
367 d2 = vec_add(b4v, b3v); \
368 /* DST(3, b6 + b1); */ \
369 d3 = vec_add(b6v, b1v); \
370 /* DST(4, b6 - b1); */ \
371 d4 = vec_sub(b6v, b1v); \
372 /* DST(5, b4 - b3); */ \
373 d5 = vec_sub(b4v, b3v); \
374 /* DST(6, b2 - b5); */ \
375 d6 = vec_sub(b2v, b5v); \
376 /* DST(7, b0 - b7); */ \
377 d7 = vec_sub(b0v, b7v); \
380 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel)\
382 /* unaligned load */ \
383 vec_u8_t hv = vec_ld( 0, dest ); \
384 vec_u8_t lv = vec_ld( 7, dest ); \
385 vec_u8_t dstv = vec_perm( hv, lv, (vec_u8_t)perm_ldv ); \
386 vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
387 vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv); \
388 vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
389 vec_u8_t idstsum8 = vec_packsu(zero_s16v, idstsum); \
390 /* unaligned store */ \
391 vec_u8_t bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
392 vec_u8_t edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
393 lv = vec_sel( lv, bodyv, edgelv ); \
394 vec_st( lv, 7, dest ); \
395 hv = vec_ld( 0, dest ); \
396 vec_u8_t edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
397 hv = vec_sel( hv, bodyv, edgehv ); \
398 vec_st( hv, 0, dest ); \
401 void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[64] )
403 vec_u16_t onev = vec_splat_u16(1);
404 vec_u16_t twov = vec_splat_u16(2);
406 dct[0] += 32; // rounding for the >>6 at the end
408 vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;
410 s0 = vec_ld(0x00, dct);
411 s1 = vec_ld(0x10, dct);
412 s2 = vec_ld(0x20, dct);
413 s3 = vec_ld(0x30, dct);
414 s4 = vec_ld(0x40, dct);
415 s5 = vec_ld(0x50, dct);
416 s6 = vec_ld(0x60, dct);
417 s7 = vec_ld(0x70, dct);
419 vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;
420 IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7);
422 vec_s16_t tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7;
424 VEC_TRANSPOSE_8( d0, d1, d2, d3, d4, d5, d6, d7,
425 tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7);
427 vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
428 IDCT8_1D_ALTIVEC(tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7,
429 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
431 vec_u8_t perm_ldv = vec_lvsl(0, dst);
432 vec_u8_t perm_stv = vec_lvsr(8, dst);
433 vec_u16_t sixv = vec_splat_u16(6);
434 const vec_u8_t sel = (vec_u8_t) CV(0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1);
437 ALTIVEC_STORE_SUM_CLIP(&dst[0*FDEC_STRIDE], idct0, perm_ldv, perm_stv, sel);
438 ALTIVEC_STORE_SUM_CLIP(&dst[1*FDEC_STRIDE], idct1, perm_ldv, perm_stv, sel);
439 ALTIVEC_STORE_SUM_CLIP(&dst[2*FDEC_STRIDE], idct2, perm_ldv, perm_stv, sel);
440 ALTIVEC_STORE_SUM_CLIP(&dst[3*FDEC_STRIDE], idct3, perm_ldv, perm_stv, sel);
441 ALTIVEC_STORE_SUM_CLIP(&dst[4*FDEC_STRIDE], idct4, perm_ldv, perm_stv, sel);
442 ALTIVEC_STORE_SUM_CLIP(&dst[5*FDEC_STRIDE], idct5, perm_ldv, perm_stv, sel);
443 ALTIVEC_STORE_SUM_CLIP(&dst[6*FDEC_STRIDE], idct6, perm_ldv, perm_stv, sel);
444 ALTIVEC_STORE_SUM_CLIP(&dst[7*FDEC_STRIDE], idct7, perm_ldv, perm_stv, sel);
447 void x264_add16x16_idct8_altivec( uint8_t *dst, int16_t dct[4][64] )
449 x264_add8x8_idct8_altivec( &dst[0], dct[0] );
450 x264_add8x8_idct8_altivec( &dst[8], dct[1] );
451 x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+0], dct[2] );
452 x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+8], dct[3] );
455 void x264_zigzag_scan_4x4_frame_altivec( int16_t level[16], int16_t dct[16] )
457 vec_s16_t dct0v, dct1v;
458 vec_s16_t tmp0v, tmp1v;
460 dct0v = vec_ld(0x00, dct);
461 dct1v = vec_ld(0x10, dct);
463 const vec_u8_t sel0 = (vec_u8_t) CV(0,1,8,9,2,3,4,5,10,11,16,17,24,25,18,19);
464 const vec_u8_t sel1 = (vec_u8_t) CV(12,13,6,7,14,15,20,21,26,27,28,29,22,23,30,31);
466 tmp0v = vec_perm( dct0v, dct1v, sel0 );
467 tmp1v = vec_perm( dct0v, dct1v, sel1 );
469 vec_st( tmp0v, 0x00, level );
470 vec_st( tmp1v, 0x10, level );
473 void x264_zigzag_scan_4x4_field_altivec( int16_t level[16], int16_t dct[16] )
475 vec_s16_t dct0v, dct1v;
476 vec_s16_t tmp0v, tmp1v;
478 dct0v = vec_ld(0x00, dct);
479 dct1v = vec_ld(0x10, dct);
481 const vec_u8_t sel0 = (vec_u8_t) CV(0,1,2,3,8,9,4,5,6,7,10,11,12,13,14,15);
483 tmp0v = vec_perm( dct0v, dct1v, sel0 );
486 vec_st( tmp0v, 0x00, level );
487 vec_st( tmp1v, 0x10, level );
489 #endif // !HIGH_BIT_DEPTH