2 * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavcodec/dsputil.h"
23 #include "gcc_fixes.h"
25 #include "dsputil_altivec.h"
26 #include "util_altivec.h"
28 static void vector_fmul_altivec(float *dst, const float *src, int len)
31 vector float d0, d1, s, zero = (vector float)vec_splat_u32(0);
32 for(i=0; i<len-7; i+=8) {
33 d0 = vec_ld(0, dst+i);
35 d1 = vec_ld(16, dst+i);
36 d0 = vec_madd(d0, s, zero);
37 d1 = vec_madd(d1, vec_ld(16,src+i), zero);
39 vec_st(d1, 16, dst+i);
43 static void vector_fmul_reverse_altivec(float *dst, const float *src0,
44 const float *src1, int len)
47 vector float d, s0, s1, h0, l0,
48 s2, s3, zero = (vector float)vec_splat_u32(0);
50 for(i=0; i<len-7; i+=8) {
51 s1 = vec_ld(0, src1-i); // [a,b,c,d]
52 s0 = vec_ld(0, src0+i);
53 l0 = vec_mergel(s1, s1); // [c,c,d,d]
54 s3 = vec_ld(-16, src1-i);
55 h0 = vec_mergeh(s1, s1); // [a,a,b,b]
56 s2 = vec_ld(16, src0+i);
57 s1 = vec_mergeh(vec_mergel(l0,h0), // [d,b,d,b]
58 vec_mergeh(l0,h0)); // [c,a,c,a]
60 l0 = vec_mergel(s3, s3);
61 d = vec_madd(s0, s1, zero);
62 h0 = vec_mergeh(s3, s3);
64 s3 = vec_mergeh(vec_mergel(l0,h0),
66 d = vec_madd(s2, s3, zero);
71 static void vector_fmul_add_add_altivec(float *dst, const float *src0,
72 const float *src1, const float *src2,
73 int src3, int len, int step)
76 vector float d, s0, s1, s2, t0, t1, edges;
77 vector unsigned char align = vec_lvsr(0,dst),
78 mask = vec_lvsl(0, dst);
80 #if 0 //FIXME: there is still something wrong
83 vector float d0, d1, s3, t2;
84 vector unsigned int sel =
85 vec_mergeh(vec_splat_u32(-1), vec_splat_u32(0));
87 for (i=0,y=0; i<len-3; i+=4,y+=8) {
89 s0 = vec_ld(0,src0+i);
90 s1 = vec_ld(0,src1+i);
91 s2 = vec_ld(0,src2+i);
93 // t0 = vec_ld(0, dst+y); //[x x x|a]
94 // t1 = vec_ld(16, dst+y); //[b c d|e]
95 t2 = vec_ld(31, dst+y); //[f g h|x]
97 d = vec_madd(s0,s1,s2); // [A B C D]
103 d0 = vec_perm(t0, t1, mask); // [a b c d]
105 d0 = vec_sel(vec_mergeh(d, d), d0, sel); // [A b B d]
107 edges = vec_perm(t1, t0, mask);
109 t0 = vec_perm(edges, d0, align); // [x x x|A]
111 t1 = vec_perm(d0, edges, align); // [b B d|e]
113 vec_stl(t0, 0, dst+y);
115 d1 = vec_perm(t1, t2, mask); // [e f g h]
117 d1 = vec_sel(vec_mergel(d, d), d1, sel); // [C f D h]
119 edges = vec_perm(t2, t1, mask);
121 t1 = vec_perm(edges, d1, align); // [b B d|C]
123 t2 = vec_perm(d1, edges, align); // [f D h|x]
125 vec_stl(t1, 16, dst+y);
129 vec_stl(t2, 31, dst+y);
135 if (step == 1 && src3 == 0)
136 for (i=0; i<len-3; i+=4) {
137 t0 = vec_ld(0, dst+i);
138 t1 = vec_ld(15, dst+i);
139 s0 = vec_ld(0, src0+i);
140 s1 = vec_ld(0, src1+i);
141 s2 = vec_ld(0, src2+i);
142 edges = vec_perm(t1 ,t0, mask);
143 d = vec_madd(s0,s1,s2);
144 t1 = vec_perm(d, edges, align);
145 t0 = vec_perm(edges, d, align);
146 vec_st(t1, 15, dst+i);
147 vec_st(t0, 0, dst+i);
150 ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
153 static void vector_fmul_window_altivec(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len)
159 vector float vadd_bias, zero, t0, t1, s0, s1, wi, wj;
160 const vector unsigned char reverse = vcprm(3,2,1,0);
167 vadd.s[0] = add_bias;
168 vadd_bias = vec_splat(vadd.v, 0);
169 zero = (vector float)vec_splat_u32(0);
171 for(i=-len*4, j=len*4-16; i<0; i+=16, j-=16) {
172 s0 = vec_ld(i, src0);
173 s1 = vec_ld(j, src1);
177 s1 = vec_perm(s1, s1, reverse);
178 wj = vec_perm(wj, wj, reverse);
180 t0 = vec_madd(s0, wj, vadd_bias);
181 t0 = vec_nmsub(s1, wi, t0);
182 t1 = vec_madd(s0, wi, vadd_bias);
183 t1 = vec_madd(s1, wj, t1);
184 t1 = vec_perm(t1, t1, reverse);
191 static void int32_to_float_fmul_scalar_altivec(float *dst, const int *src, float mul, int len)
198 vector float src1, src2, dst1, dst2, mul_v, zero;
200 zero = (vector float)vec_splat_u32(0);
202 mul_v = vec_splat(mul_u.v, 0);
204 for(i=0; i<len; i+=8) {
205 src1 = vec_ctf(vec_ld(0, src+i), 0);
206 src2 = vec_ctf(vec_ld(16, src+i), 0);
207 dst1 = vec_madd(src1, mul_v, zero);
208 dst2 = vec_madd(src2, mul_v, zero);
209 vec_st(dst1, 0, dst+i);
210 vec_st(dst2, 16, dst+i);
215 static vector signed short
216 float_to_int16_one_altivec(const float *src)
218 vector float s0 = vec_ld(0, src);
219 vector float s1 = vec_ld(16, src);
220 vector signed int t0 = vec_cts(s0, 0);
221 vector signed int t1 = vec_cts(s1, 0);
222 return vec_packs(t0,t1);
225 static void float_to_int16_altivec(int16_t *dst, const float *src, long len)
228 vector signed short d0, d1, d;
229 vector unsigned char align;
230 if(((long)dst)&15) //FIXME
231 for(i=0; i<len-7; i+=8) {
232 d0 = vec_ld(0, dst+i);
233 d = float_to_int16_one_altivec(src+i);
234 d1 = vec_ld(15, dst+i);
235 d1 = vec_perm(d1, d0, vec_lvsl(0,dst+i));
236 align = vec_lvsr(0, dst+i);
237 d0 = vec_perm(d1, d, align);
238 d1 = vec_perm(d, d1, align);
239 vec_st(d0, 0, dst+i);
240 vec_st(d1,15, dst+i);
243 for(i=0; i<len-7; i+=8) {
244 d = float_to_int16_one_altivec(src+i);
250 float_to_int16_interleave_altivec(int16_t *dst, const float **src,
251 long len, int channels)
254 vector signed short d0, d1, d2, c0, c1, t0, t1;
255 vector unsigned char align;
257 float_to_int16_altivec(dst, src[0], len);
261 for(i=0; i<len-7; i+=8) {
262 d0 = vec_ld(0, dst + i);
263 t0 = float_to_int16_one_altivec(src[0] + i);
264 d1 = vec_ld(31, dst + i);
265 t1 = float_to_int16_one_altivec(src[1] + i);
266 c0 = vec_mergeh(t0, t1);
267 c1 = vec_mergel(t0, t1);
268 d2 = vec_perm(d1, d0, vec_lvsl(0, dst + i));
269 align = vec_lvsr(0, dst + i);
270 d0 = vec_perm(d2, c0, align);
271 d1 = vec_perm(c0, c1, align);
272 vec_st(d0, 0, dst + i);
273 d0 = vec_perm(c1, d2, align);
274 vec_st(d1, 15, dst + i);
275 vec_st(d0, 31, dst + i);
279 for(i=0; i<len-7; i+=8) {
280 t0 = float_to_int16_one_altivec(src[0] + i);
281 t1 = float_to_int16_one_altivec(src[1] + i);
282 d0 = vec_mergeh(t0, t1);
283 d1 = vec_mergel(t0, t1);
284 vec_st(d0, 0, dst + i);
285 vec_st(d1, 16, dst + i);
289 DECLARE_ALIGNED(16, int16_t, tmp[len]);
291 for (c = 0; c < channels; c++) {
292 float_to_int16_altivec(tmp, src[c], len);
293 for (i = 0, j = c; i < len; i++, j+=channels) {
300 void float_init_altivec(DSPContext* c, AVCodecContext *avctx)
302 c->vector_fmul = vector_fmul_altivec;
303 c->vector_fmul_reverse = vector_fmul_reverse_altivec;
304 c->vector_fmul_add_add = vector_fmul_add_add_altivec;
305 c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_altivec;
306 if(!(avctx->flags & CODEC_FLAG_BITEXACT)) {
307 c->vector_fmul_window = vector_fmul_window_altivec;
308 c->float_to_int16 = float_to_int16_altivec;
309 c->float_to_int16_interleave = float_to_int16_interleave_altivec;