2 * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized
3 * Copyright (c) 2006 Konstantin Shishkov
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 #include "gcc_fixes.h"
26 #include "util_altivec.h"
28 // main steps of 8x8 transform
29 #define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \
31 t0 = vec_sl(vec_add(s0, s4), vec_2); \
32 t0 = vec_add(vec_sl(t0, vec_1), t0); \
33 t0 = vec_add(t0, vec_rnd); \
34 t1 = vec_sl(vec_sub(s0, s4), vec_2); \
35 t1 = vec_add(vec_sl(t1, vec_1), t1); \
36 t1 = vec_add(t1, vec_rnd); \
37 t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \
38 t2 = vec_add(t2, vec_sl(s2, vec_4)); \
39 t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \
40 t3 = vec_sub(t3, vec_sl(s6, vec_4)); \
41 t4 = vec_add(t0, t2); \
42 t5 = vec_add(t1, t3); \
43 t6 = vec_sub(t1, t3); \
44 t7 = vec_sub(t0, t2); \
46 t0 = vec_sl(vec_add(s1, s3), vec_4); \
47 t0 = vec_add(t0, vec_sl(s5, vec_3)); \
48 t0 = vec_add(t0, vec_sl(s7, vec_2)); \
49 t0 = vec_add(t0, vec_sub(s5, s3)); \
51 t1 = vec_sl(vec_sub(s1, s5), vec_4); \
52 t1 = vec_sub(t1, vec_sl(s7, vec_3)); \
53 t1 = vec_sub(t1, vec_sl(s3, vec_2)); \
54 t1 = vec_sub(t1, vec_add(s1, s7)); \
56 t2 = vec_sl(vec_sub(s7, s3), vec_4); \
57 t2 = vec_add(t2, vec_sl(s1, vec_3)); \
58 t2 = vec_add(t2, vec_sl(s5, vec_2)); \
59 t2 = vec_add(t2, vec_sub(s1, s7)); \
61 t3 = vec_sl(vec_sub(s5, s7), vec_4); \
62 t3 = vec_sub(t3, vec_sl(s3, vec_3)); \
63 t3 = vec_add(t3, vec_sl(s1, vec_2)); \
64 t3 = vec_sub(t3, vec_add(s3, s5)); \
66 s0 = vec_add(t4, t0); \
67 s1 = vec_add(t5, t1); \
68 s2 = vec_add(t6, t2); \
69 s3 = vec_add(t7, t3); \
70 s4 = vec_sub(t7, t3); \
71 s5 = vec_sub(t6, t2); \
72 s6 = vec_sub(t5, t1); \
73 s7 = vec_sub(t4, t0); \
76 #define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \
78 s0 = vec_sra(s0, vec_3); \
79 s1 = vec_sra(s1, vec_3); \
80 s2 = vec_sra(s2, vec_3); \
81 s3 = vec_sra(s3, vec_3); \
82 s4 = vec_sra(s4, vec_3); \
83 s5 = vec_sra(s5, vec_3); \
84 s6 = vec_sra(s6, vec_3); \
85 s7 = vec_sra(s7, vec_3); \
88 #define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \
90 s0 = vec_sra(s0, vec_7); \
91 s1 = vec_sra(s1, vec_7); \
92 s2 = vec_sra(s2, vec_7); \
93 s3 = vec_sra(s3, vec_7); \
94 s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \
95 s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \
96 s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \
97 s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \
100 /* main steps of 4x4 transform */
101 #define STEP4(s0, s1, s2, s3, vec_rnd) \
103 t1 = vec_add(vec_sl(s0, vec_4), s0); \
104 t1 = vec_add(t1, vec_rnd); \
105 t2 = vec_add(vec_sl(s2, vec_4), s2); \
106 t0 = vec_add(t1, t2); \
107 t1 = vec_sub(t1, t2); \
108 t3 = vec_sl(vec_sub(s3, s1), vec_1); \
109 t3 = vec_add(t3, vec_sl(t3, vec_2)); \
110 t2 = vec_add(t3, vec_sl(s1, vec_5)); \
111 t3 = vec_add(t3, vec_sl(s3, vec_3)); \
112 t3 = vec_add(t3, vec_sl(s3, vec_2)); \
113 s0 = vec_add(t0, t2); \
114 s1 = vec_sub(t1, t3); \
115 s2 = vec_add(t1, t3); \
116 s3 = vec_sub(t0, t2); \
119 #define SHIFT_HOR4(s0, s1, s2, s3) \
120 s0 = vec_sra(s0, vec_3); \
121 s1 = vec_sra(s1, vec_3); \
122 s2 = vec_sra(s2, vec_3); \
123 s3 = vec_sra(s3, vec_3);
125 #define SHIFT_VERT4(s0, s1, s2, s3) \
126 s0 = vec_sra(s0, vec_7); \
127 s1 = vec_sra(s1, vec_7); \
128 s2 = vec_sra(s2, vec_7); \
129 s3 = vec_sra(s3, vec_7);
131 /** Do inverse transform on 8x8 block
133 static void vc1_inv_trans_8x8_altivec(DCTELEM block[64])
135 vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
136 vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
137 vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
138 vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
139 const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
140 const vector unsigned int vec_7 = vec_splat_u32(7);
141 const vector unsigned int vec_5 = vec_splat_u32(5);
142 const vector unsigned int vec_4 = vec_splat_u32(4);
143 const vector signed int vec_4s = vec_splat_s32(4);
144 const vector unsigned int vec_3 = vec_splat_u32(3);
145 const vector unsigned int vec_2 = vec_splat_u32(2);
146 const vector signed int vec_1s = vec_splat_s32(1);
147 const vector unsigned int vec_1 = vec_splat_u32(1);
150 src0 = vec_ld( 0, block);
151 src1 = vec_ld( 16, block);
152 src2 = vec_ld( 32, block);
153 src3 = vec_ld( 48, block);
154 src4 = vec_ld( 64, block);
155 src5 = vec_ld( 80, block);
156 src6 = vec_ld( 96, block);
157 src7 = vec_ld(112, block);
159 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
160 s0 = vec_unpackl(src0);
161 s1 = vec_unpackl(src1);
162 s2 = vec_unpackl(src2);
163 s3 = vec_unpackl(src3);
164 s4 = vec_unpackl(src4);
165 s5 = vec_unpackl(src5);
166 s6 = vec_unpackl(src6);
167 s7 = vec_unpackl(src7);
168 s8 = vec_unpackh(src0);
169 s9 = vec_unpackh(src1);
170 sA = vec_unpackh(src2);
171 sB = vec_unpackh(src3);
172 sC = vec_unpackh(src4);
173 sD = vec_unpackh(src5);
174 sE = vec_unpackh(src6);
175 sF = vec_unpackh(src7);
176 STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
177 SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
178 STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
179 SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
180 src0 = vec_pack(s8, s0);
181 src1 = vec_pack(s9, s1);
182 src2 = vec_pack(sA, s2);
183 src3 = vec_pack(sB, s3);
184 src4 = vec_pack(sC, s4);
185 src5 = vec_pack(sD, s5);
186 src6 = vec_pack(sE, s6);
187 src7 = vec_pack(sF, s7);
188 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
190 s0 = vec_unpackl(src0);
191 s1 = vec_unpackl(src1);
192 s2 = vec_unpackl(src2);
193 s3 = vec_unpackl(src3);
194 s4 = vec_unpackl(src4);
195 s5 = vec_unpackl(src5);
196 s6 = vec_unpackl(src6);
197 s7 = vec_unpackl(src7);
198 s8 = vec_unpackh(src0);
199 s9 = vec_unpackh(src1);
200 sA = vec_unpackh(src2);
201 sB = vec_unpackh(src3);
202 sC = vec_unpackh(src4);
203 sD = vec_unpackh(src5);
204 sE = vec_unpackh(src6);
205 sF = vec_unpackh(src7);
206 STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
207 SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7);
208 STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
209 SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF);
210 src0 = vec_pack(s8, s0);
211 src1 = vec_pack(s9, s1);
212 src2 = vec_pack(sA, s2);
213 src3 = vec_pack(sB, s3);
214 src4 = vec_pack(sC, s4);
215 src5 = vec_pack(sD, s5);
216 src6 = vec_pack(sE, s6);
217 src7 = vec_pack(sF, s7);
219 vec_st(src0, 0, block);
220 vec_st(src1, 16, block);
221 vec_st(src2, 32, block);
222 vec_st(src3, 48, block);
223 vec_st(src4, 64, block);
224 vec_st(src5, 80, block);
225 vec_st(src6, 96, block);
226 vec_st(src7,112, block);
229 /** Do inverse transform on 8x4 part of block
231 static void vc1_inv_trans_8x4_altivec(DCTELEM block[64], int n)
233 vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
234 vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
235 vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
236 vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
237 const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
238 const vector unsigned int vec_7 = vec_splat_u32(7);
239 const vector unsigned int vec_5 = vec_splat_u32(5);
240 const vector unsigned int vec_4 = vec_splat_u32(4);
241 const vector signed int vec_4s = vec_splat_s32(4);
242 const vector unsigned int vec_3 = vec_splat_u32(3);
243 const vector unsigned int vec_2 = vec_splat_u32(2);
244 const vector unsigned int vec_1 = vec_splat_u32(1);
246 src0 = vec_ld( 0, block);
247 src1 = vec_ld( 16, block);
248 src2 = vec_ld( 32, block);
249 src3 = vec_ld( 48, block);
250 src4 = vec_ld( 64, block);
251 src5 = vec_ld( 80, block);
252 src6 = vec_ld( 96, block);
253 src7 = vec_ld(112, block);
255 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
256 s0 = vec_unpackl(src0);
257 s1 = vec_unpackl(src1);
258 s2 = vec_unpackl(src2);
259 s3 = vec_unpackl(src3);
260 s4 = vec_unpackl(src4);
261 s5 = vec_unpackl(src5);
262 s6 = vec_unpackl(src6);
263 s7 = vec_unpackl(src7);
264 s8 = vec_unpackh(src0);
265 s9 = vec_unpackh(src1);
266 sA = vec_unpackh(src2);
267 sB = vec_unpackh(src3);
268 sC = vec_unpackh(src4);
269 sD = vec_unpackh(src5);
270 sE = vec_unpackh(src6);
271 sF = vec_unpackh(src7);
272 STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
273 SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
274 STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
275 SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
276 src0 = vec_pack(s8, s0);
277 src1 = vec_pack(s9, s1);
278 src2 = vec_pack(sA, s2);
279 src3 = vec_pack(sB, s3);
280 src4 = vec_pack(sC, s4);
281 src5 = vec_pack(sD, s5);
282 src6 = vec_pack(sE, s6);
283 src7 = vec_pack(sF, s7);
284 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
286 if(!n){ // upper half of block
287 s0 = vec_unpackh(src0);
288 s1 = vec_unpackh(src1);
289 s2 = vec_unpackh(src2);
290 s3 = vec_unpackh(src3);
291 s8 = vec_unpackl(src0);
292 s9 = vec_unpackl(src1);
293 sA = vec_unpackl(src2);
294 sB = vec_unpackl(src3);
295 STEP4(s0, s1, s2, s3, vec_64);
296 SHIFT_VERT4(s0, s1, s2, s3);
297 STEP4(s8, s9, sA, sB, vec_64);
298 SHIFT_VERT4(s8, s9, sA, sB);
299 src0 = vec_pack(s0, s8);
300 src1 = vec_pack(s1, s9);
301 src2 = vec_pack(s2, sA);
302 src3 = vec_pack(s3, sB);
304 vec_st(src0, 0, block);
305 vec_st(src1, 16, block);
306 vec_st(src2, 32, block);
307 vec_st(src3, 48, block);
308 } else { //lower half of block
309 s0 = vec_unpackh(src4);
310 s1 = vec_unpackh(src5);
311 s2 = vec_unpackh(src6);
312 s3 = vec_unpackh(src7);
313 s8 = vec_unpackl(src4);
314 s9 = vec_unpackl(src5);
315 sA = vec_unpackl(src6);
316 sB = vec_unpackl(src7);
317 STEP4(s0, s1, s2, s3, vec_64);
318 SHIFT_VERT4(s0, s1, s2, s3);
319 STEP4(s8, s9, sA, sB, vec_64);
320 SHIFT_VERT4(s8, s9, sA, sB);
321 src4 = vec_pack(s0, s8);
322 src5 = vec_pack(s1, s9);
323 src6 = vec_pack(s2, sA);
324 src7 = vec_pack(s3, sB);
326 vec_st(src4, 64, block);
327 vec_st(src5, 80, block);
328 vec_st(src6, 96, block);
329 vec_st(src7,112, block);
334 void vc1dsp_init_altivec(DSPContext* dsp, AVCodecContext *avctx) {
335 dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec;
336 dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec;