1 /*****************************************************************************
2 * dct.c: h264 encoder library
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
7 * Authors: Eric Petit <titer@m0k.org>
8 * Guillaume Poirier <gpoirier@mplayerhq.hu>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
23 *****************************************************************************/
29 #include "common/common.h"
30 #include "ppccommon.h"
32 #define VEC_DCT(a0,a1,a2,a3,b0,b1,b2,b3) \
33 b1 = vec_add( a0, a3 ); \
34 b3 = vec_add( a1, a2 ); \
35 b0 = vec_add( b1, b3 ); \
36 b2 = vec_sub( b1, b3 ); \
37 a0 = vec_sub( a0, a3 ); \
38 a1 = vec_sub( a1, a2 ); \
39 b1 = vec_add( a0, a0 ); \
40 b1 = vec_add( b1, a1 ); \
41 b3 = vec_sub( a0, a1 ); \
42 b3 = vec_sub( b3, a1 )
44 void x264_sub4x4_dct_altivec( int16_t dct[4][4],
45 uint8_t *pix1, uint8_t *pix2 )
47 PREP_DIFF_8BYTEALIGNED;
48 vec_s16_t dct0v, dct1v, dct2v, dct3v;
49 vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v;
53 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct0v );
54 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct1v );
55 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct2v );
56 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 4, dct3v );
57 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
58 VEC_TRANSPOSE_4( tmp0v, tmp1v, tmp2v, tmp3v,
59 dct0v, dct1v, dct2v, dct3v );
60 permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17);
61 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
63 vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, (int16_t*)dct);
64 vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, (int16_t*)dct);
67 void x264_sub8x8_dct_altivec( int16_t dct[4][4][4],
68 uint8_t *pix1, uint8_t *pix2 )
70 PREP_DIFF_8BYTEALIGNED;
71 vec_s16_t dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v;
72 vec_s16_t tmp0v, tmp1v, tmp2v, tmp3v, tmp4v, tmp5v, tmp6v, tmp7v;
74 vec_u8_t permHighv, permLowv;
76 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct0v );
77 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct1v );
78 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct2v );
79 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct3v );
80 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct4v );
81 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct5v );
82 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct6v );
83 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct7v );
84 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
85 VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
86 VEC_TRANSPOSE_8( tmp0v, tmp1v, tmp2v, tmp3v,
87 tmp4v, tmp5v, tmp6v, tmp7v,
88 dct0v, dct1v, dct2v, dct3v,
89 dct4v, dct5v, dct6v, dct7v );
91 permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17);
92 permLowv = (vec_u8_t) CV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F);
94 VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
95 VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
97 vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, (int16_t*)dct);
98 vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, (int16_t*)dct);
99 vec_st(vec_perm(tmp4v, tmp5v, permHighv), 32, (int16_t*)dct);
100 vec_st(vec_perm(tmp6v, tmp7v, permHighv), 48, (int16_t*)dct);
101 vec_st(vec_perm(tmp0v, tmp1v, permLowv), 64, (int16_t*)dct);
102 vec_st(vec_perm(tmp2v, tmp3v, permLowv), 80, (int16_t*)dct);
103 vec_st(vec_perm(tmp4v, tmp5v, permLowv), 96, (int16_t*)dct);
104 vec_st(vec_perm(tmp6v, tmp7v, permLowv), 112, (int16_t*)dct);
107 void x264_sub16x16_dct_altivec( int16_t dct[16][4][4],
108 uint8_t *pix1, uint8_t *pix2 )
110 x264_sub8x8_dct_altivec( &dct[ 0], &pix1[0], &pix2[0] );
111 x264_sub8x8_dct_altivec( &dct[ 4], &pix1[8], &pix2[8] );
112 x264_sub8x8_dct_altivec( &dct[ 8], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
113 x264_sub8x8_dct_altivec( &dct[12], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
116 /***************************************************************************
118 ***************************************************************************/
120 /* DCT8_1D unrolled by 8 in Altivec */
121 #define DCT8_1D_ALTIVEC( dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v ) \
123 /* int s07 = SRC(0) + SRC(7); */ \
124 vec_s16_t s07v = vec_add( dct0v, dct7v); \
125 /* int s16 = SRC(1) + SRC(6); */ \
126 vec_s16_t s16v = vec_add( dct1v, dct6v); \
127 /* int s25 = SRC(2) + SRC(5); */ \
128 vec_s16_t s25v = vec_add( dct2v, dct5v); \
129 /* int s34 = SRC(3) + SRC(4); */ \
130 vec_s16_t s34v = vec_add( dct3v, dct4v); \
132 /* int a0 = s07 + s34; */ \
133 vec_s16_t a0v = vec_add(s07v, s34v); \
134 /* int a1 = s16 + s25; */ \
135 vec_s16_t a1v = vec_add(s16v, s25v); \
136 /* int a2 = s07 - s34; */ \
137 vec_s16_t a2v = vec_sub(s07v, s34v); \
138 /* int a3 = s16 - s25; */ \
139 vec_s16_t a3v = vec_sub(s16v, s25v); \
141 /* int d07 = SRC(0) - SRC(7); */ \
142 vec_s16_t d07v = vec_sub( dct0v, dct7v); \
143 /* int d16 = SRC(1) - SRC(6); */ \
144 vec_s16_t d16v = vec_sub( dct1v, dct6v); \
145 /* int d25 = SRC(2) - SRC(5); */ \
146 vec_s16_t d25v = vec_sub( dct2v, dct5v); \
147 /* int d34 = SRC(3) - SRC(4); */ \
148 vec_s16_t d34v = vec_sub( dct3v, dct4v); \
150 /* int a4 = d16 + d25 + (d07 + (d07>>1)); */ \
151 vec_s16_t a4v = vec_add( vec_add(d16v, d25v), vec_add(d07v, vec_sra(d07v, onev)) );\
152 /* int a5 = d07 - d34 - (d25 + (d25>>1)); */ \
153 vec_s16_t a5v = vec_sub( vec_sub(d07v, d34v), vec_add(d25v, vec_sra(d25v, onev)) );\
154 /* int a6 = d07 + d34 - (d16 + (d16>>1)); */ \
155 vec_s16_t a6v = vec_sub( vec_add(d07v, d34v), vec_add(d16v, vec_sra(d16v, onev)) );\
156 /* int a7 = d16 - d25 + (d34 + (d34>>1)); */ \
157 vec_s16_t a7v = vec_add( vec_sub(d16v, d25v), vec_add(d34v, vec_sra(d34v, onev)) );\
159 /* DST(0) = a0 + a1; */ \
160 dct0v = vec_add( a0v, a1v ); \
161 /* DST(1) = a4 + (a7>>2); */ \
162 dct1v = vec_add( a4v, vec_sra(a7v, twov) ); \
163 /* DST(2) = a2 + (a3>>1); */ \
164 dct2v = vec_add( a2v, vec_sra(a3v, onev) ); \
165 /* DST(3) = a5 + (a6>>2); */ \
166 dct3v = vec_add( a5v, vec_sra(a6v, twov) ); \
167 /* DST(4) = a0 - a1; */ \
168 dct4v = vec_sub( a0v, a1v ); \
169 /* DST(5) = a6 - (a5>>2); */ \
170 dct5v = vec_sub( a6v, vec_sra(a5v, twov) ); \
171 /* DST(6) = (a2>>1) - a3 ; */ \
172 dct6v = vec_sub( vec_sra(a2v, onev), a3v ); \
173 /* DST(7) = (a4>>2) - a7 ; */ \
174 dct7v = vec_sub( vec_sra(a4v, twov), a7v ); \
178 void x264_sub8x8_dct8_altivec( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
180 vec_u16_t onev = vec_splat_u16(1);
181 vec_u16_t twov = vec_add( onev, onev );
183 PREP_DIFF_8BYTEALIGNED;
185 vec_s16_t dct0v, dct1v, dct2v, dct3v,
186 dct4v, dct5v, dct6v, dct7v;
188 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct0v );
189 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct1v );
190 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct2v );
191 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct3v );
193 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct4v );
194 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct5v );
195 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct6v );
196 VEC_DIFF_H_8BYTE_ALIGNED( pix1, FENC_STRIDE, pix2, FDEC_STRIDE, 8, dct7v );
198 DCT8_1D_ALTIVEC( dct0v, dct1v, dct2v, dct3v,
199 dct4v, dct5v, dct6v, dct7v );
201 vec_s16_t dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
202 dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v;
204 VEC_TRANSPOSE_8(dct0v, dct1v, dct2v, dct3v,
205 dct4v, dct5v, dct6v, dct7v,
206 dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
207 dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v );
209 DCT8_1D_ALTIVEC( dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
210 dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v );
212 vec_st( dct_tr0v, 0, (signed short *)dct );
213 vec_st( dct_tr1v, 16, (signed short *)dct );
214 vec_st( dct_tr2v, 32, (signed short *)dct );
215 vec_st( dct_tr3v, 48, (signed short *)dct );
217 vec_st( dct_tr4v, 64, (signed short *)dct );
218 vec_st( dct_tr5v, 80, (signed short *)dct );
219 vec_st( dct_tr6v, 96, (signed short *)dct );
220 vec_st( dct_tr7v, 112, (signed short *)dct );
223 void x264_sub16x16_dct8_altivec( int16_t dct[4][8][8], uint8_t *pix1, uint8_t *pix2 )
225 x264_sub8x8_dct8_altivec( dct[0], &pix1[0], &pix2[0] );
226 x264_sub8x8_dct8_altivec( dct[1], &pix1[8], &pix2[8] );
227 x264_sub8x8_dct8_altivec( dct[2], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] );
228 x264_sub8x8_dct8_altivec( dct[3], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] );
232 /****************************************************************************
234 ****************************************************************************/
236 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7)\
238 /* a0 = SRC(0) + SRC(4); */ \
239 vec_s16_t a0v = vec_add(s0, s4); \
240 /* a2 = SRC(0) - SRC(4); */ \
241 vec_s16_t a2v = vec_sub(s0, s4); \
242 /* a4 = (SRC(2)>>1) - SRC(6); */ \
243 vec_s16_t a4v = vec_sub(vec_sra(s2, onev), s6); \
244 /* a6 = (SRC(6)>>1) + SRC(2); */ \
245 vec_s16_t a6v = vec_add(vec_sra(s6, onev), s2); \
246 /* b0 = a0 + a6; */ \
247 vec_s16_t b0v = vec_add(a0v, a6v); \
248 /* b2 = a2 + a4; */ \
249 vec_s16_t b2v = vec_add(a2v, a4v); \
250 /* b4 = a2 - a4; */ \
251 vec_s16_t b4v = vec_sub(a2v, a4v); \
252 /* b6 = a0 - a6; */ \
253 vec_s16_t b6v = vec_sub(a0v, a6v); \
254 /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
255 /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
256 vec_s16_t a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) );\
257 /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
258 /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
259 vec_s16_t a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
260 /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
261 /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
262 vec_s16_t a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
263 /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
264 vec_s16_t a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
265 /* b1 = (a7>>2) + a1; */ \
266 vec_s16_t b1v = vec_add( vec_sra(a7v, twov), a1v); \
267 /* b3 = a3 + (a5>>2); */ \
268 vec_s16_t b3v = vec_add(a3v, vec_sra(a5v, twov)); \
269 /* b5 = (a3>>2) - a5; */ \
270 vec_s16_t b5v = vec_sub( vec_sra(a3v, twov), a5v); \
271 /* b7 = a7 - (a1>>2); */ \
272 vec_s16_t b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
273 /* DST(0, b0 + b7); */ \
274 d0 = vec_add(b0v, b7v); \
275 /* DST(1, b2 + b5); */ \
276 d1 = vec_add(b2v, b5v); \
277 /* DST(2, b4 + b3); */ \
278 d2 = vec_add(b4v, b3v); \
279 /* DST(3, b6 + b1); */ \
280 d3 = vec_add(b6v, b1v); \
281 /* DST(4, b6 - b1); */ \
282 d4 = vec_sub(b6v, b1v); \
283 /* DST(5, b4 - b3); */ \
284 d5 = vec_sub(b4v, b3v); \
285 /* DST(6, b2 - b5); */ \
286 d6 = vec_sub(b2v, b5v); \
287 /* DST(7, b0 - b7); */ \
288 d7 = vec_sub(b0v, b7v); \
291 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel)\
293 /* unaligned load */ \
294 vec_u8_t hv = vec_ld( 0, dest ); \
295 vec_u8_t lv = vec_ld( 7, dest ); \
296 vec_u8_t dstv = vec_perm( hv, lv, (vec_u8_t)perm_ldv ); \
297 vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
298 vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv); \
299 vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
300 vec_u8_t idstsum8 = vec_packsu(zero_s16v, idstsum); \
301 /* unaligned store */ \
302 vec_u8_t bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
303 vec_u8_t edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
304 lv = vec_sel( lv, bodyv, edgelv ); \
305 vec_st( lv, 7, dest ); \
306 hv = vec_ld( 0, dest ); \
307 vec_u8_t edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
308 hv = vec_sel( hv, bodyv, edgehv ); \
309 vec_st( hv, 0, dest ); \
312 void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[8][8] )
314 vec_u16_t onev = vec_splat_u16(1);
315 vec_u16_t twov = vec_splat_u16(2);
317 dct[0][0] += 32; // rounding for the >>6 at the end
319 vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;
321 s0 = vec_ld(0x00, (int16_t*)dct);
322 s1 = vec_ld(0x10, (int16_t*)dct);
323 s2 = vec_ld(0x20, (int16_t*)dct);
324 s3 = vec_ld(0x30, (int16_t*)dct);
325 s4 = vec_ld(0x40, (int16_t*)dct);
326 s5 = vec_ld(0x50, (int16_t*)dct);
327 s6 = vec_ld(0x60, (int16_t*)dct);
328 s7 = vec_ld(0x70, (int16_t*)dct);
330 vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;
331 IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7);
333 vec_s16_t tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7;
335 VEC_TRANSPOSE_8( d0, d1, d2, d3, d4, d5, d6, d7,
336 tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7);
338 vec_s16_t idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
339 IDCT8_1D_ALTIVEC(tr0, tr1, tr2, tr3, tr4, tr5, tr6, tr7,
340 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
342 vec_u8_t perm_ldv = vec_lvsl(0, dst);
343 vec_u8_t perm_stv = vec_lvsr(8, dst);
344 vec_u16_t sixv = vec_splat_u16(6);
345 const vec_u8_t sel = (vec_u8_t) CV(0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1);
348 ALTIVEC_STORE_SUM_CLIP(&dst[0*FDEC_STRIDE], idct0, perm_ldv, perm_stv, sel);
349 ALTIVEC_STORE_SUM_CLIP(&dst[1*FDEC_STRIDE], idct1, perm_ldv, perm_stv, sel);
350 ALTIVEC_STORE_SUM_CLIP(&dst[2*FDEC_STRIDE], idct2, perm_ldv, perm_stv, sel);
351 ALTIVEC_STORE_SUM_CLIP(&dst[3*FDEC_STRIDE], idct3, perm_ldv, perm_stv, sel);
352 ALTIVEC_STORE_SUM_CLIP(&dst[4*FDEC_STRIDE], idct4, perm_ldv, perm_stv, sel);
353 ALTIVEC_STORE_SUM_CLIP(&dst[5*FDEC_STRIDE], idct5, perm_ldv, perm_stv, sel);
354 ALTIVEC_STORE_SUM_CLIP(&dst[6*FDEC_STRIDE], idct6, perm_ldv, perm_stv, sel);
355 ALTIVEC_STORE_SUM_CLIP(&dst[7*FDEC_STRIDE], idct7, perm_ldv, perm_stv, sel);
358 void x264_add16x16_idct8_altivec( uint8_t *dst, int16_t dct[4][8][8] )
360 x264_add8x8_idct8_altivec( &dst[0], dct[0] );
361 x264_add8x8_idct8_altivec( &dst[8], dct[1] );
362 x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+0], dct[2] );
363 x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+8], dct[3] );