2 * Copyright (c) 2001 Michel Lespinasse
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2 of the License, or (at your option) any later version.
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 * NOTE: This code is based on GPL code from the libmpeg2 project. The
22 * author, Michel Lespinasses, has given explicit permission to release
23 * under LGPL as part of ffmpeg.
28 * FFMpeg integration by Dieter Shirley
30 * This file is a direct copy of the altivec idct module from the libmpeg2
31 * project. I've deleted all of the libmpeg2 specific code, renamed the functions and
32 * re-ordered the function parameters. The only change to the IDCT function
33 * itself was to factor out the partial transposition, and to perform a full
34 * transpose at the end of the function.
38 #include <stdlib.h> /* malloc(), free() */
40 #include "../dsputil.h"
42 #include "gcc_fixes.h"
44 #include "dsputil_altivec.h"
46 #define vector_s16_t vector signed short
47 #define vector_u16_t vector unsigned short
48 #define vector_s8_t vector signed char
49 #define vector_u8_t vector unsigned char
50 #define vector_s32_t vector signed int
51 #define vector_u32_t vector unsigned int
55 t1 = vec_mradds (a1, vx7, vx1 ); \
56 t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \
57 t7 = vec_mradds (a2, vx5, vx3); \
58 t3 = vec_mradds (ma2, vx3, vx5); \
61 t5 = vec_adds (vx0, vx4); \
62 t0 = vec_subs (vx0, vx4); \
63 t2 = vec_mradds (a0, vx6, vx2); \
64 t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \
65 t6 = vec_adds (t8, t3); \
66 t3 = vec_subs (t8, t3); \
67 t8 = vec_subs (t1, t7); \
68 t1 = vec_adds (t1, t7); \
71 t7 = vec_adds (t5, t2); \
72 t2 = vec_subs (t5, t2); \
73 t5 = vec_adds (t0, t4); \
74 t0 = vec_subs (t0, t4); \
75 t4 = vec_subs (t8, t3); \
76 t3 = vec_adds (t8, t3); \
79 vy0 = vec_adds (t7, t1); \
80 vy7 = vec_subs (t7, t1); \
81 vy1 = vec_mradds (c4, t3, t5); \
82 vy6 = vec_mradds (mc4, t3, t5); \
83 vy2 = vec_mradds (c4, t4, t0); \
84 vy5 = vec_mradds (mc4, t4, t0); \
85 vy3 = vec_adds (t2, t6); \
86 vy4 = vec_subs (t2, t6);
90 vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
91 vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
92 vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \
93 vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \
96 c4 = vec_splat (constants[0], 0); \
97 a0 = vec_splat (constants[0], 1); \
98 a1 = vec_splat (constants[0], 2); \
99 a2 = vec_splat (constants[0], 3); \
100 mc4 = vec_splat (constants[0], 4); \
101 ma2 = vec_splat (constants[0], 5); \
102 bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3); \
104 zero = vec_splat_s16 (0); \
105 shift = vec_splat_u16 (4); \
107 vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \
108 vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \
109 vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \
110 vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \
111 vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \
112 vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \
113 vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \
114 vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \
118 vx0 = vec_mergeh (vy0, vy4); \
119 vx1 = vec_mergel (vy0, vy4); \
120 vx2 = vec_mergeh (vy1, vy5); \
121 vx3 = vec_mergel (vy1, vy5); \
122 vx4 = vec_mergeh (vy2, vy6); \
123 vx5 = vec_mergel (vy2, vy6); \
124 vx6 = vec_mergeh (vy3, vy7); \
125 vx7 = vec_mergel (vy3, vy7); \
127 vy0 = vec_mergeh (vx0, vx4); \
128 vy1 = vec_mergel (vx0, vx4); \
129 vy2 = vec_mergeh (vx1, vx5); \
130 vy3 = vec_mergel (vx1, vx5); \
131 vy4 = vec_mergeh (vx2, vx6); \
132 vy5 = vec_mergel (vx2, vx6); \
133 vy6 = vec_mergeh (vx3, vx7); \
134 vy7 = vec_mergel (vx3, vx7); \
136 vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \
137 vx1 = vec_mergel (vy0, vy4); \
138 vx2 = vec_mergeh (vy1, vy5); \
139 vx3 = vec_mergel (vy1, vy5); \
140 vx4 = vec_mergeh (vy2, vy6); \
141 vx5 = vec_mergel (vy2, vy6); \
142 vx6 = vec_mergeh (vy3, vy7); \
143 vx7 = vec_mergel (vy3, vy7); \
147 shift = vec_splat_u16 (6); \
148 vx0 = vec_sra (vy0, shift); \
149 vx1 = vec_sra (vy1, shift); \
150 vx2 = vec_sra (vy2, shift); \
151 vx3 = vec_sra (vy3, shift); \
152 vx4 = vec_sra (vy4, shift); \
153 vx5 = vec_sra (vy5, shift); \
154 vx6 = vec_sra (vy6, shift); \
155 vx7 = vec_sra (vy7, shift);
158 static const vector_s16_t constants[5] = {
159 (vector_s16_t) AVV(23170, 13573, 6518, 21895, -23170, -21895, 32, 31),
160 (vector_s16_t) AVV(16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725),
161 (vector_s16_t) AVV(22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521),
162 (vector_s16_t) AVV(21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692),
163 (vector_s16_t) AVV(19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722)
166 void idct_put_altivec(uint8_t* dest, int stride, vector_s16_t* block)
168 POWERPC_PERF_DECLARE(altivec_idct_put_num, 1);
169 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
170 POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1);
171 void simple_idct_put(uint8_t *dest, int line_size, int16_t *block);
172 simple_idct_put(dest, stride, (int16_t*)block);
173 POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1);
174 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
177 POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1);
181 #define COPY(dest,src) \
182 tmp = vec_packsu (src, src); \
183 vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
184 vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
186 COPY (dest, vx0) dest += stride;
187 COPY (dest, vx1) dest += stride;
188 COPY (dest, vx2) dest += stride;
189 COPY (dest, vx3) dest += stride;
190 COPY (dest, vx4) dest += stride;
191 COPY (dest, vx5) dest += stride;
192 COPY (dest, vx6) dest += stride;
195 POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1);
196 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */
199 void idct_add_altivec(uint8_t* dest, int stride, vector_s16_t* block)
201 POWERPC_PERF_DECLARE(altivec_idct_add_num, 1);
202 #ifdef ALTIVEC_USE_REFERENCE_C_CODE
203 POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1);
204 void simple_idct_add(uint8_t *dest, int line_size, int16_t *block);
205 simple_idct_add(dest, stride, (int16_t*)block);
206 POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1);
207 #else /* ALTIVEC_USE_REFERENCE_C_CODE */
209 vector_s16_t tmp2, tmp3;
212 vector_u8_t p0, p1, p;
214 POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1);
218 p0 = vec_lvsl (0, dest);
219 p1 = vec_lvsl (stride, dest);
220 p = vec_splat_u8 (-1);
221 perm0 = vec_mergeh (p, p0);
222 perm1 = vec_mergeh (p, p1);
224 #define ADD(dest,src,perm) \
225 /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
226 tmp = vec_ld (0, dest); \
227 tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \
228 tmp3 = vec_adds (tmp2, src); \
229 tmp = vec_packsu (tmp3, tmp3); \
230 vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
231 vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
233 ADD (dest, vx0, perm0) dest += stride;
234 ADD (dest, vx1, perm1) dest += stride;
235 ADD (dest, vx2, perm0) dest += stride;
236 ADD (dest, vx3, perm1) dest += stride;
237 ADD (dest, vx4, perm0) dest += stride;
238 ADD (dest, vx5, perm1) dest += stride;
239 ADD (dest, vx6, perm0) dest += stride;
240 ADD (dest, vx7, perm1)
242 POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1);
243 #endif /* ALTIVEC_USE_REFERENCE_C_CODE */