1 /*****************************************************************************
2 * idctaltivec.c : AltiVec IDCT module
3 *****************************************************************************
4 * Copyright (C) 2001 VideoLAN
5 * $Id: idctaltivec.c,v 1.27 2002/07/31 20:56:51 sam Exp $
7 * Authors: Christophe Massiot <massiot@via.ecp.fr>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
22 *****************************************************************************/
24 #ifndef __BUILD_ALTIVEC_ASM__
26 /*****************************************************************************
28 *****************************************************************************/
31 #include <stdlib.h> /* malloc(), free() */
33 #ifdef HAVE_INTTYPES_H
34 # include <inttypes.h> /* int16_t .. */
39 static int Open( vlc_object_t *p_this );
41 /*****************************************************************************
43 *****************************************************************************/
45 set_description( _("AltiVec IDCT module") );
46 set_capability( "idct", 200 );
47 add_shortcut( "altivec" );
48 add_requirement( ALTIVEC );
49 set_callbacks( Open, NULL );
52 /*****************************************************************************
53 * NormScan : This IDCT uses reordered coeffs, so we patch the scan table
54 *****************************************************************************/
55 static void NormScan( u8 ppi_scan[2][64] )
59 for( i = 0; i < 64; i++ )
62 ppi_scan[0][i] = (j >> 3) | ((j & 7) << 3);
65 ppi_scan[1][i] = (j >> 3) | ((j & 7) << 3);
69 /*****************************************************************************
70 * Placeholders for unused functions
71 *****************************************************************************/
72 static void InitIDCT( void ** pp_idct_data )
76 /*****************************************************************************
78 *****************************************************************************/
80 #ifndef CAN_COMPILE_C_ALTIVEC
82 static int16_t constants[5][8] ATTR_ALIGN(16) = {
83 {23170, 13573, 6518, 21895, -23170, -21895, 32, 31},
84 {16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725},
85 {22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521},
86 {21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692},
87 {19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722}
91 * The asm code is generated with:
93 * gcc-2.95 -fvec -D__BUILD_ALTIVEC_ASM__ -O9 -fomit-frame-pointer -mregnames -S
96 * awk '{args=""; len=split ($2, arg, ",");
97 * for (i=1; i<=len; i++) { a=arg[i]; if (i<len) a=a",";
98 * args = args sprintf ("%-6s", a) }
99 * printf ("\t\"\t%-16s%-24s\\n\"\n", $1, args) }' idct_altivec.s |
102 * I then do some simple trimming on the function prolog/trailers
105 void idct_block_copy_altivec (int16_t * block, uint8_t * dest, int stride)
108 "# stwu %r1, -128(%r1) \n"
110 "# stw %r0, 132(%r1) \n"
111 "# addi %r0, %r1, 128 \n"
114 " addi %r9, %r3, 112 \n"
115 " vspltish %v25, 4 \n"
116 " vxor %v13, %v13, %v13 \n"
117 " lis %r10, constants@ha \n"
118 " lvx %v1, 0, %r9 \n"
119 " la %r10, constants@l(%r10) \n"
120 " lvx %v5, 0, %r3 \n"
121 " addi %r9, %r3, 16 \n"
122 " lvx %v8, 0, %r10 \n"
123 " addi %r11, %r10, 32 \n"
124 " lvx %v12, 0, %r9 \n"
125 " lvx %v6, 0, %r11 \n"
126 " addi %r8, %r3, 48 \n"
127 " vslh %v1, %v1, %v25 \n"
128 " addi %r9, %r3, 80 \n"
129 " lvx %v11, 0, %r8 \n"
130 " vslh %v5, %v5, %v25 \n"
131 " lvx %v0, 0, %r9 \n"
132 " addi %r11, %r10, 64 \n"
133 " vsplth %v3, %v8, 2 \n"
134 " lvx %v7, 0, %r11 \n"
135 " addi %r9, %r3, 96 \n"
136 " vslh %v12, %v12, %v25 \n"
137 " vmhraddshs %v27, %v1, %v6, %v13 \n"
138 " addi %r8, %r3, 32 \n"
139 " vsplth %v2, %v8, 5 \n"
140 " lvx %v1, 0, %r9 \n"
141 " vslh %v11, %v11, %v25 \n"
142 " addi %r3, %r3, 64 \n"
143 " lvx %v9, 0, %r8 \n"
144 " addi %r9, %r10, 48 \n"
145 " vslh %v0, %v0, %v25 \n"
146 " lvx %v4, 0, %r9 \n"
147 " vmhraddshs %v31, %v12, %v6, %v13 \n"
148 " addi %r10, %r10, 16 \n"
149 " vmhraddshs %v30, %v0, %v7, %v13 \n"
150 " lvx %v10, 0, %r3 \n"
151 " vsplth %v19, %v8, 3 \n"
152 " vmhraddshs %v15, %v11, %v7, %v13 \n"
153 " lvx %v12, 0, %r10 \n"
154 " vsplth %v6, %v8, 4 \n"
155 " vslh %v1, %v1, %v25 \n"
156 " vsplth %v11, %v8, 1 \n"
158 " vslh %v9, %v9, %v25 \n"
159 " vsplth %v7, %v8, 0 \n"
160 " vmhraddshs %v18, %v1, %v4, %v13 \n"
161 " vspltw %v8, %v8, 3 \n"
162 " vsubshs %v0, %v13, %v27 \n"
163 " vmhraddshs %v1, %v9, %v4, %v13 \n"
164 " vmhraddshs %v17, %v3, %v31, %v0 \n"
165 " vmhraddshs %v4, %v2, %v15, %v30 \n"
166 " vslh %v10, %v10, %v25 \n"
167 " vmhraddshs %v9, %v5, %v12, %v13 \n"
168 " vspltish %v25, 6 \n"
169 " vmhraddshs %v5, %v10, %v12, %v13 \n"
170 " vmhraddshs %v28, %v19, %v30, %v15 \n"
171 " vmhraddshs %v27, %v3, %v27, %v31 \n"
172 " vsubshs %v0, %v13, %v18 \n"
173 " vmhraddshs %v18, %v11, %v18, %v1 \n"
174 " vaddshs %v30, %v17, %v4 \n"
175 " vmhraddshs %v12, %v11, %v1, %v0 \n"
176 " vsubshs %v4, %v17, %v4 \n"
177 " vaddshs %v10, %v9, %v5 \n"
178 " vsubshs %v17, %v27, %v28 \n"
179 " vaddshs %v27, %v27, %v28 \n"
180 " vsubshs %v1, %v9, %v5 \n"
181 " vaddshs %v28, %v10, %v18 \n"
182 " vsubshs %v18, %v10, %v18 \n"
183 " vaddshs %v10, %v1, %v12 \n"
184 " vsubshs %v1, %v1, %v12 \n"
185 " vsubshs %v12, %v17, %v4 \n"
186 " vaddshs %v4, %v17, %v4 \n"
187 " vmhraddshs %v5, %v7, %v12, %v1 \n"
188 " vmhraddshs %v26, %v6, %v4, %v10 \n"
189 " vmhraddshs %v29, %v6, %v12, %v1 \n"
190 " vmhraddshs %v14, %v7, %v4, %v10 \n"
191 " vsubshs %v12, %v18, %v30 \n"
192 " vaddshs %v9, %v28, %v27 \n"
193 " vaddshs %v16, %v18, %v30 \n"
194 " vsubshs %v10, %v28, %v27 \n"
195 " vmrglh %v31, %v9, %v12 \n"
196 " vmrglh %v30, %v5, %v26 \n"
197 " vmrglh %v15, %v14, %v29 \n"
198 " vmrghh %v5, %v5, %v26 \n"
199 " vmrglh %v27, %v16, %v10 \n"
200 " vmrghh %v9, %v9, %v12 \n"
201 " vmrghh %v18, %v16, %v10 \n"
202 " vmrghh %v1, %v14, %v29 \n"
203 " vmrglh %v14, %v9, %v5 \n"
204 " vmrglh %v16, %v31, %v30 \n"
205 " vmrglh %v10, %v15, %v27 \n"
206 " vmrghh %v9, %v9, %v5 \n"
207 " vmrghh %v26, %v15, %v27 \n"
208 " vmrglh %v27, %v16, %v10 \n"
209 " vmrghh %v12, %v1, %v18 \n"
210 " vmrglh %v29, %v1, %v18 \n"
211 " vsubshs %v0, %v13, %v27 \n"
212 " vmrghh %v5, %v31, %v30 \n"
213 " vmrglh %v31, %v9, %v12 \n"
214 " vmrglh %v30, %v5, %v26 \n"
215 " vmrglh %v15, %v14, %v29 \n"
216 " vmhraddshs %v17, %v3, %v31, %v0 \n"
217 " vmrghh %v18, %v16, %v10 \n"
218 " vmhraddshs %v27, %v3, %v27, %v31 \n"
219 " vmhraddshs %v4, %v2, %v15, %v30 \n"
220 " vmrghh %v1, %v14, %v29 \n"
221 " vmhraddshs %v28, %v19, %v30, %v15 \n"
222 " vmrghh %v0, %v9, %v12 \n"
223 " vsubshs %v13, %v13, %v18 \n"
224 " vmrghh %v5, %v5, %v26 \n"
225 " vmhraddshs %v18, %v11, %v18, %v1 \n"
226 " vaddshs %v9, %v0, %v8 \n"
227 " vaddshs %v30, %v17, %v4 \n"
228 " vmhraddshs %v12, %v11, %v1, %v13 \n"
229 " vsubshs %v4, %v17, %v4 \n"
230 " vaddshs %v10, %v9, %v5 \n"
231 " vsubshs %v17, %v27, %v28 \n"
232 " vaddshs %v27, %v27, %v28 \n"
233 " vsubshs %v1, %v9, %v5 \n"
234 " vaddshs %v28, %v10, %v18 \n"
235 " vsubshs %v18, %v10, %v18 \n"
236 " vaddshs %v10, %v1, %v12 \n"
237 " vsubshs %v1, %v1, %v12 \n"
238 " vsubshs %v12, %v17, %v4 \n"
239 " vaddshs %v4, %v17, %v4 \n"
240 " vaddshs %v9, %v28, %v27 \n"
241 " vmhraddshs %v14, %v7, %v4, %v10 \n"
242 " vsrah %v9, %v9, %v25 \n"
243 " vmhraddshs %v5, %v7, %v12, %v1 \n"
244 " vpkshus %v0, %v9, %v9 \n"
245 " vmhraddshs %v29, %v6, %v12, %v1 \n"
246 " stvewx %v0, 0, %r4 \n"
247 " vaddshs %v16, %v18, %v30 \n"
248 " vsrah %v31, %v14, %v25 \n"
249 " stvewx %v0, %r9, %r4 \n"
250 " add %r4, %r4, %r5 \n"
251 " vsrah %v15, %v16, %v25 \n"
252 " vpkshus %v0, %v31, %v31 \n"
253 " vsrah %v1, %v5, %v25 \n"
254 " stvewx %v0, 0, %r4 \n"
255 " vsubshs %v12, %v18, %v30 \n"
256 " stvewx %v0, %r9, %r4 \n"
257 " vmhraddshs %v26, %v6, %v4, %v10 \n"
258 " vpkshus %v0, %v1, %v1 \n"
259 " add %r4, %r4, %r5 \n"
260 " vsrah %v5, %v12, %v25 \n"
261 " stvewx %v0, 0, %r4 \n"
262 " vsrah %v30, %v29, %v25 \n"
263 " stvewx %v0, %r9, %r4 \n"
264 " vsubshs %v10, %v28, %v27 \n"
265 " vpkshus %v0, %v15, %v15 \n"
266 " add %r4, %r4, %r5 \n"
267 " stvewx %v0, 0, %r4 \n"
268 " vsrah %v18, %v26, %v25 \n"
269 " stvewx %v0, %r9, %r4 \n"
270 " vsrah %v27, %v10, %v25 \n"
271 " vpkshus %v0, %v5, %v5 \n"
272 " add %r4, %r4, %r5 \n"
273 " stvewx %v0, 0, %r4 \n"
274 " stvewx %v0, %r9, %r4 \n"
275 " vpkshus %v0, %v30, %v30 \n"
276 " add %r4, %r4, %r5 \n"
277 " stvewx %v0, 0, %r4 \n"
278 " stvewx %v0, %r9, %r4 \n"
279 " vpkshus %v0, %v18, %v18 \n"
280 " add %r4, %r4, %r5 \n"
281 " stvewx %v0, 0, %r4 \n"
282 " stvewx %v0, %r9, %r4 \n"
283 " add %r4, %r4, %r5 \n"
284 " vpkshus %v0, %v27, %v27 \n"
285 " stvewx %v0, 0, %r4 \n"
286 " stvewx %v0, %r9, %r4 \n"
288 "# addi %r0, %r1, 128 \n"
290 "# lwz %r0, 132(%r1) \n"
292 "# la %r1, 128(%r1) \n"
296 void idct_block_add_altivec (int16_t * block, uint8_t * dest, int stride)
299 "# stwu %r1, -192(%r1) \n"
301 "# stw %r0, 196(%r1) \n"
302 "# addi %r0, %r1, 192 \n"
305 " addi %r9, %r3, 112 \n"
306 " vspltish %v21, 4 \n"
307 " vxor %v1, %v1, %v1 \n"
308 " lvx %v13, 0, %r9 \n"
309 " lis %r10, constants@ha \n"
310 " vspltisw %v3, -1 \n"
311 " la %r10, constants@l(%r10) \n"
312 " lvx %v5, 0, %r3 \n"
313 " addi %r9, %r3, 16 \n"
314 " lvx %v8, 0, %r10 \n"
315 " lvx %v12, 0, %r9 \n"
316 " addi %r11, %r10, 32 \n"
317 " lvx %v6, 0, %r11 \n"
318 " addi %r8, %r3, 48 \n"
319 " vslh %v13, %v13, %v21 \n"
320 " addi %r9, %r3, 80 \n"
321 " lvx %v11, 0, %r8 \n"
322 " vslh %v5, %v5, %v21 \n"
323 " lvx %v0, 0, %r9 \n"
324 " addi %r11, %r10, 64 \n"
325 " vsplth %v2, %v8, 2 \n"
326 " lvx %v7, 0, %r11 \n"
327 " vslh %v12, %v12, %v21 \n"
328 " addi %r9, %r3, 96 \n"
329 " vmhraddshs %v24, %v13, %v6, %v1 \n"
330 " addi %r8, %r3, 32 \n"
331 " vsplth %v17, %v8, 5 \n"
332 " lvx %v13, 0, %r9 \n"
333 " vslh %v11, %v11, %v21 \n"
334 " addi %r3, %r3, 64 \n"
335 " lvx %v10, 0, %r8 \n"
336 " vslh %v0, %v0, %v21 \n"
337 " addi %r9, %r10, 48 \n"
338 " vmhraddshs %v31, %v12, %v6, %v1 \n"
339 " lvx %v4, 0, %r9 \n"
340 " addi %r10, %r10, 16 \n"
341 " vmhraddshs %v26, %v0, %v7, %v1 \n"
342 " lvx %v9, 0, %r3 \n"
343 " vsplth %v16, %v8, 3 \n"
344 " vmhraddshs %v22, %v11, %v7, %v1 \n"
345 " lvx %v6, 0, %r10 \n"
346 " lvsl %v19, 0, %r4 \n"
347 " vsubshs %v12, %v1, %v24 \n"
348 " lvsl %v0, %r5, %r4 \n"
349 " vsplth %v11, %v8, 1 \n"
350 " vslh %v10, %v10, %v21 \n"
351 " vmrghb %v19, %v3, %v19 \n"
352 " lvx %v15, 0, %r4 \n"
353 " vslh %v13, %v13, %v21 \n"
354 " vmrghb %v3, %v3, %v0 \n"
356 " vmhraddshs %v14, %v2, %v31, %v12 \n"
357 " vsplth %v7, %v8, 0 \n"
358 " vmhraddshs %v23, %v13, %v4, %v1 \n"
359 " vsplth %v18, %v8, 4 \n"
360 " vmhraddshs %v27, %v10, %v4, %v1 \n"
361 " vspltw %v8, %v8, 3 \n"
362 " vmhraddshs %v12, %v17, %v22, %v26 \n"
363 " vperm %v15, %v15, %v1, %v19 \n"
364 " vslh %v9, %v9, %v21 \n"
365 " vmhraddshs %v10, %v5, %v6, %v1 \n"
366 " vspltish %v21, 6 \n"
367 " vmhraddshs %v30, %v9, %v6, %v1 \n"
368 " vmhraddshs %v26, %v16, %v26, %v22 \n"
369 " vmhraddshs %v24, %v2, %v24, %v31 \n"
370 " vmhraddshs %v31, %v11, %v23, %v27 \n"
371 " vsubshs %v0, %v1, %v23 \n"
372 " vaddshs %v23, %v14, %v12 \n"
373 " vmhraddshs %v9, %v11, %v27, %v0 \n"
374 " vsubshs %v12, %v14, %v12 \n"
375 " vaddshs %v6, %v10, %v30 \n"
376 " vsubshs %v14, %v24, %v26 \n"
377 " vaddshs %v24, %v24, %v26 \n"
378 " vsubshs %v13, %v10, %v30 \n"
379 " vaddshs %v26, %v6, %v31 \n"
380 " vsubshs %v31, %v6, %v31 \n"
381 " vaddshs %v6, %v13, %v9 \n"
382 " vsubshs %v13, %v13, %v9 \n"
383 " vsubshs %v9, %v14, %v12 \n"
384 " vaddshs %v12, %v14, %v12 \n"
385 " vmhraddshs %v30, %v7, %v9, %v13 \n"
386 " vmhraddshs %v25, %v18, %v12, %v6 \n"
387 " vmhraddshs %v28, %v18, %v9, %v13 \n"
388 " vmhraddshs %v29, %v7, %v12, %v6 \n"
389 " vaddshs %v10, %v26, %v24 \n"
390 " vsubshs %v5, %v31, %v23 \n"
391 " vsubshs %v13, %v26, %v24 \n"
392 " vaddshs %v4, %v31, %v23 \n"
393 " vmrglh %v26, %v30, %v25 \n"
394 " vmrglh %v31, %v10, %v5 \n"
395 " vmrglh %v22, %v29, %v28 \n"
396 " vmrghh %v30, %v30, %v25 \n"
397 " vmrglh %v24, %v4, %v13 \n"
398 " vmrghh %v10, %v10, %v5 \n"
399 " vmrghh %v23, %v4, %v13 \n"
400 " vmrghh %v27, %v29, %v28 \n"
401 " vmrglh %v29, %v10, %v30 \n"
402 " vmrglh %v4, %v31, %v26 \n"
403 " vmrglh %v13, %v22, %v24 \n"
404 " vmrghh %v10, %v10, %v30 \n"
405 " vmrghh %v25, %v22, %v24 \n"
406 " vmrglh %v24, %v4, %v13 \n"
407 " vmrghh %v5, %v27, %v23 \n"
408 " vmrglh %v28, %v27, %v23 \n"
409 " vsubshs %v0, %v1, %v24 \n"
410 " vmrghh %v30, %v31, %v26 \n"
411 " vmrglh %v31, %v10, %v5 \n"
412 " vmrglh %v26, %v30, %v25 \n"
413 " vmrglh %v22, %v29, %v28 \n"
414 " vmhraddshs %v14, %v2, %v31, %v0 \n"
415 " vmrghh %v23, %v4, %v13 \n"
416 " vmhraddshs %v24, %v2, %v24, %v31 \n"
417 " vmhraddshs %v12, %v17, %v22, %v26 \n"
418 " vmrghh %v27, %v29, %v28 \n"
419 " vmhraddshs %v26, %v16, %v26, %v22 \n"
420 " vmrghh %v0, %v10, %v5 \n"
421 " vmhraddshs %v31, %v11, %v23, %v27 \n"
422 " vmrghh %v30, %v30, %v25 \n"
423 " vsubshs %v13, %v1, %v23 \n"
424 " vaddshs %v10, %v0, %v8 \n"
425 " vaddshs %v23, %v14, %v12 \n"
426 " vsubshs %v12, %v14, %v12 \n"
427 " vaddshs %v6, %v10, %v30 \n"
428 " vsubshs %v14, %v24, %v26 \n"
429 " vmhraddshs %v9, %v11, %v27, %v13 \n"
430 " vaddshs %v24, %v24, %v26 \n"
431 " vaddshs %v26, %v6, %v31 \n"
432 " vsubshs %v13, %v10, %v30 \n"
433 " vaddshs %v10, %v26, %v24 \n"
434 " vsubshs %v31, %v6, %v31 \n"
435 " vaddshs %v6, %v13, %v9 \n"
436 " vsrah %v10, %v10, %v21 \n"
437 " vsubshs %v13, %v13, %v9 \n"
438 " vaddshs %v0, %v15, %v10 \n"
439 " vsubshs %v9, %v14, %v12 \n"
440 " vaddshs %v12, %v14, %v12 \n"
441 " vpkshus %v15, %v0, %v0 \n"
442 " stvewx %v15, 0, %r4 \n"
443 " vaddshs %v4, %v31, %v23 \n"
444 " vmhraddshs %v29, %v7, %v12, %v6 \n"
445 " stvewx %v15, %r9, %r4 \n"
446 " add %r4, %r4, %r5 \n"
447 " vsubshs %v5, %v31, %v23 \n"
448 " lvx %v15, 0, %r4 \n"
449 " vmhraddshs %v30, %v7, %v9, %v13 \n"
450 " vsrah %v22, %v4, %v21 \n"
451 " vperm %v15, %v15, %v1, %v3 \n"
452 " vmhraddshs %v28, %v18, %v9, %v13 \n"
453 " vsrah %v31, %v29, %v21 \n"
454 " vsubshs %v13, %v26, %v24 \n"
455 " vaddshs %v0, %v15, %v31 \n"
456 " vsrah %v27, %v30, %v21 \n"
457 " vpkshus %v15, %v0, %v0 \n"
458 " vsrah %v30, %v5, %v21 \n"
459 " stvewx %v15, 0, %r4 \n"
460 " vsrah %v26, %v28, %v21 \n"
461 " stvewx %v15, %r9, %r4 \n"
462 " vmhraddshs %v25, %v18, %v12, %v6 \n"
463 " add %r4, %r4, %r5 \n"
464 " vsrah %v24, %v13, %v21 \n"
465 " lvx %v15, 0, %r4 \n"
466 " vperm %v15, %v15, %v1, %v19 \n"
467 " vsrah %v23, %v25, %v21 \n"
468 " vaddshs %v0, %v15, %v27 \n"
469 " vpkshus %v15, %v0, %v0 \n"
470 " stvewx %v15, 0, %r4 \n"
471 " stvewx %v15, %r9, %r4 \n"
472 " add %r4, %r4, %r5 \n"
473 " lvx %v15, 0, %r4 \n"
474 " vperm %v15, %v15, %v1, %v3 \n"
475 " vaddshs %v0, %v15, %v22 \n"
476 " vpkshus %v15, %v0, %v0 \n"
477 " stvewx %v15, 0, %r4 \n"
478 " stvewx %v15, %r9, %r4 \n"
479 " add %r4, %r4, %r5 \n"
480 " lvx %v15, 0, %r4 \n"
481 " vperm %v15, %v15, %v1, %v19 \n"
482 " vaddshs %v0, %v15, %v30 \n"
483 " vpkshus %v15, %v0, %v0 \n"
484 " stvewx %v15, 0, %r4 \n"
485 " stvewx %v15, %r9, %r4 \n"
486 " add %r4, %r4, %r5 \n"
487 " lvx %v15, 0, %r4 \n"
488 " vperm %v15, %v15, %v1, %v3 \n"
489 " vaddshs %v0, %v15, %v26 \n"
490 " vpkshus %v15, %v0, %v0 \n"
491 " stvewx %v15, 0, %r4 \n"
492 " stvewx %v15, %r9, %r4 \n"
493 " add %r4, %r4, %r5 \n"
494 " lvx %v15, 0, %r4 \n"
495 " vperm %v15, %v15, %v1, %v19 \n"
496 " vaddshs %v0, %v15, %v23 \n"
497 " vpkshus %v15, %v0, %v0 \n"
498 " stvewx %v15, 0, %r4 \n"
499 " stvewx %v15, %r9, %r4 \n"
500 " add %r4, %r4, %r5 \n"
501 " lvx %v15, 0, %r4 \n"
502 " vperm %v15, %v15, %v1, %v3 \n"
503 " vaddshs %v0, %v15, %v24 \n"
504 " vpkshus %v15, %v0, %v0 \n"
505 " stvewx %v15, 0, %r4 \n"
506 " stvewx %v15, %r9, %r4 \n"
508 "# addi %r0, %r1, 192 \n"
510 "# lwz %r0, 196(%r1) \n"
512 "# la %r1, 192(%r1) \n"
516 #endif /* !CAN_COMPILE_C_ALTIVEC */
517 #endif /* __BUILD_ALTIVEC_ASM__ */
520 #if defined(CAN_COMPILE_C_ALTIVEC) || defined(__BUILD_ALTIVEC_ASM__)
522 #define vector_s16_t vector signed short
523 #define vector_u16_t vector unsigned short
524 #define vector_s8_t vector signed char
525 #define vector_u8_t vector unsigned char
526 #define vector_s32_t vector signed int
527 #define vector_u32_t vector unsigned int
531 t1 = vec_mradds (a1, vx7, vx1 ); \
532 t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \
533 t7 = vec_mradds (a2, vx5, vx3); \
534 t3 = vec_mradds (ma2, vx3, vx5); \
537 t5 = vec_adds (vx0, vx4); \
538 t0 = vec_subs (vx0, vx4); \
539 t2 = vec_mradds (a0, vx6, vx2); \
540 t4 = vec_mradds (a0, vx2, vec_subs (zero,vx6)); \
541 t6 = vec_adds (t8, t3); \
542 t3 = vec_subs (t8, t3); \
543 t8 = vec_subs (t1, t7); \
544 t1 = vec_adds (t1, t7); \
547 t7 = vec_adds (t5, t2); \
548 t2 = vec_subs (t5, t2); \
549 t5 = vec_adds (t0, t4); \
550 t0 = vec_subs (t0, t4); \
551 t4 = vec_subs (t8, t3); \
552 t3 = vec_adds (t8, t3); \
555 vy0 = vec_adds (t7, t1); \
556 vy7 = vec_subs (t7, t1); \
557 vy1 = vec_mradds (c4, t3, t5); \
558 vy6 = vec_mradds (mc4, t3, t5); \
559 vy2 = vec_mradds (c4, t4, t0); \
560 vy5 = vec_mradds (mc4, t4, t0); \
561 vy3 = vec_adds (t2, t6); \
562 vy4 = vec_subs (t2, t6);
565 vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
566 vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
567 vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \
568 vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \
569 vector_u16_t shift; \
571 c4 = vec_splat (constants[0], 0); \
572 a0 = vec_splat (constants[0], 1); \
573 a1 = vec_splat (constants[0], 2); \
574 a2 = vec_splat (constants[0], 3); \
575 mc4 = vec_splat (constants[0], 4); \
576 ma2 = vec_splat (constants[0], 5); \
577 bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3); \
579 zero = vec_splat_s16 (0); \
580 shift = vec_splat_u16 (4); \
582 vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \
583 vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \
584 vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \
585 vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \
586 vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \
587 vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \
588 vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \
589 vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \
593 vx0 = vec_mergeh (vy0, vy4); \
594 vx1 = vec_mergel (vy0, vy4); \
595 vx2 = vec_mergeh (vy1, vy5); \
596 vx3 = vec_mergel (vy1, vy5); \
597 vx4 = vec_mergeh (vy2, vy6); \
598 vx5 = vec_mergel (vy2, vy6); \
599 vx6 = vec_mergeh (vy3, vy7); \
600 vx7 = vec_mergel (vy3, vy7); \
602 vy0 = vec_mergeh (vx0, vx4); \
603 vy1 = vec_mergel (vx0, vx4); \
604 vy2 = vec_mergeh (vx1, vx5); \
605 vy3 = vec_mergel (vx1, vx5); \
606 vy4 = vec_mergeh (vx2, vx6); \
607 vy5 = vec_mergel (vx2, vx6); \
608 vy6 = vec_mergeh (vx3, vx7); \
609 vy7 = vec_mergel (vx3, vx7); \
611 vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \
612 vx1 = vec_mergel (vy0, vy4); \
613 vx2 = vec_mergeh (vy1, vy5); \
614 vx3 = vec_mergel (vy1, vy5); \
615 vx4 = vec_mergeh (vy2, vy6); \
616 vx5 = vec_mergel (vy2, vy6); \
617 vx6 = vec_mergeh (vy3, vy7); \
618 vx7 = vec_mergel (vy3, vy7); \
622 shift = vec_splat_u16 (6); \
623 vx0 = vec_sra (vy0, shift); \
624 vx1 = vec_sra (vy1, shift); \
625 vx2 = vec_sra (vy2, shift); \
626 vx3 = vec_sra (vy3, shift); \
627 vx4 = vec_sra (vy4, shift); \
628 vx5 = vec_sra (vy5, shift); \
629 vx6 = vec_sra (vy6, shift); \
630 vx7 = vec_sra (vy7, shift);
632 static vector_s16_t constants[5] ATTR_ALIGN(16) = {
633 (vector_s16_t)(23170, 13573, 6518, 21895, -23170, -21895, 32, 31),
634 (vector_s16_t)(16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725),
635 (vector_s16_t)(22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521),
636 (vector_s16_t)(21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692),
637 (vector_s16_t)(19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722)
640 void idct_block_copy_altivec (vector_s16_t * block, unsigned char * dest,
647 #define COPY(dest,src) \
648 tmp = vec_packsu (src, src); \
649 vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
650 vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
652 COPY (dest, vx0) dest += stride;
653 COPY (dest, vx1) dest += stride;
654 COPY (dest, vx2) dest += stride;
655 COPY (dest, vx3) dest += stride;
656 COPY (dest, vx4) dest += stride;
657 COPY (dest, vx5) dest += stride;
658 COPY (dest, vx6) dest += stride;
662 void idct_block_add_altivec (vector_s16_t * block, unsigned char * dest,
666 vector_s16_t tmp2, tmp3;
669 vector_u8_t p0, p1, p;
673 p0 = vec_lvsl (0, dest);
674 p1 = vec_lvsl (stride, dest);
675 p = vec_splat_u8 (-1);
676 perm0 = vec_mergeh (p, p0);
677 perm1 = vec_mergeh (p, p1);
679 #define ADD(dest,src,perm) \
680 /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
681 tmp = vec_ld (0, dest); \
682 tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \
683 tmp3 = vec_adds (tmp2, src); \
684 tmp = vec_packsu (tmp3, tmp3); \
685 vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
686 vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
688 ADD (dest, vx0, perm0) dest += stride;
689 ADD (dest, vx1, perm1) dest += stride;
690 ADD (dest, vx2, perm0) dest += stride;
691 ADD (dest, vx3, perm1) dest += stride;
692 ADD (dest, vx4, perm0) dest += stride;
693 ADD (dest, vx5, perm1) dest += stride;
694 ADD (dest, vx6, perm0) dest += stride;
695 ADD (dest, vx7, perm1)
698 #endif /* __BUILD_ALTIVEC_ASM__ || CAN_COMPILE_C_ALTIVEC */
700 #ifndef __BUILD_ALTIVEC_ASM__
702 /*****************************************************************************
703 * Functions exported as capabilities. They are declared as static so that
704 * we don't pollute the namespace too much.
705 *****************************************************************************/
706 static void idct_getfunctions( function_list_t * p_function_list )
708 #define F p_function_list->functions.idct
709 F.pf_idct_init = InitIDCT;
710 F.pf_norm_scan = NormScan;
711 /* FIXME : it would be a nice idea to use sparse IDCT functions */
712 F.pf_sparse_idct_add = idct_block_add_altivec;
713 F.pf_sparse_idct_copy = idct_block_copy_altivec;
714 F.pf_idct_add = idct_block_add_altivec;
715 F.pf_idct_copy = idct_block_copy_altivec;
719 #endif /* __BUILD_ALTIVEC_ASM__ */