2 * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "libavcodec/dsputil.h"
22 #include "libavcodec/h264data.h"
24 #include "dsputil_ppc.h"
25 #include "dsputil_altivec.h"
26 #include "util_altivec.h"
27 #include "types_altivec.h"
29 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
30 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
32 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
33 #define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec
34 #define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num
35 #define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
36 #define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
37 #define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
38 #define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
39 #define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
40 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
41 #include "h264_template_altivec.c"
43 #undef PREFIX_h264_chroma_mc8_altivec
44 #undef PREFIX_h264_chroma_mc8_num
45 #undef PREFIX_h264_qpel16_h_lowpass_altivec
46 #undef PREFIX_h264_qpel16_h_lowpass_num
47 #undef PREFIX_h264_qpel16_v_lowpass_altivec
48 #undef PREFIX_h264_qpel16_v_lowpass_num
49 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
50 #undef PREFIX_h264_qpel16_hv_lowpass_num
52 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
53 #define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec
54 #define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num
55 #define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
56 #define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
57 #define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
58 #define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
59 #define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
60 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
61 #include "h264_template_altivec.c"
63 #undef PREFIX_h264_chroma_mc8_altivec
64 #undef PREFIX_h264_chroma_mc8_num
65 #undef PREFIX_h264_qpel16_h_lowpass_altivec
66 #undef PREFIX_h264_qpel16_h_lowpass_num
67 #undef PREFIX_h264_qpel16_v_lowpass_altivec
68 #undef PREFIX_h264_qpel16_v_lowpass_num
69 #undef PREFIX_h264_qpel16_hv_lowpass_altivec
70 #undef PREFIX_h264_qpel16_hv_lowpass_num
72 #define H264_MC(OPNAME, SIZE, CODETYPE) \
73 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\
74 OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
77 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
78 DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
79 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
80 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
83 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
84 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
87 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
88 DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
89 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
90 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
93 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
94 DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
95 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
96 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
99 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
100 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
103 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
104 DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
105 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
106 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
109 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
110 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
111 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
112 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
113 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
114 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
117 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
118 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
119 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
120 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
121 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
122 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
125 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
126 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
127 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
128 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
129 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
130 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
133 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
134 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
135 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
136 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
137 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
138 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
141 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
142 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
143 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
146 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
147 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
148 DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
149 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
150 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
151 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
152 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
155 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
156 DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
157 DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
158 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
159 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
160 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
161 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
164 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
165 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
166 DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
167 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
168 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
169 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
170 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
173 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
174 DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
175 DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
176 DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
177 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
178 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
179 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
182 /* this code assume that stride % 16 == 0 */
183 void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
184 DECLARE_ALIGNED_16(signed int, ABCD[4]) =
185 {((8 - x) * (8 - y)),
191 const vec_s32 vABCD = vec_ld(0, ABCD);
192 const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
193 const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
194 const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
195 const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
197 const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
198 const vec_u16 v6us = vec_splat_u16(6);
199 register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
200 register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
202 vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
203 vec_u8 vsrc0uc, vsrc1uc;
204 vec_s16 vsrc0ssH, vsrc1ssH;
205 vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
206 vec_s16 vsrc2ssH, vsrc3ssH, psum;
207 vec_u8 vdst, ppsum, fsum;
209 if (((unsigned long)dst) % 16 == 0) {
210 fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
211 0x14, 0x15, 0x16, 0x17,
212 0x08, 0x09, 0x0A, 0x0B,
213 0x0C, 0x0D, 0x0E, 0x0F};
215 fperm = (vec_u8){0x00, 0x01, 0x02, 0x03,
216 0x04, 0x05, 0x06, 0x07,
217 0x18, 0x19, 0x1A, 0x1B,
218 0x1C, 0x1D, 0x1E, 0x1F};
221 vsrcAuc = vec_ld(0, src);
224 vsrcBuc = vec_ld(16, src);
225 vsrcperm0 = vec_lvsl(0, src);
226 vsrcperm1 = vec_lvsl(1, src);
228 vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
232 vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
234 vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
235 vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
237 if (!loadSecond) {// -> !reallyBadAlign
238 for (i = 0 ; i < h ; i++) {
241 vsrcCuc = vec_ld(stride + 0, src);
243 vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
244 vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
246 vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc2uc);
247 vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc3uc);
249 psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
250 psum = vec_mladd(vB, vsrc1ssH, psum);
251 psum = vec_mladd(vC, vsrc2ssH, psum);
252 psum = vec_mladd(vD, vsrc3ssH, psum);
253 psum = vec_add(v28ss, psum);
254 psum = vec_sra(psum, v6us);
256 vdst = vec_ld(0, dst);
257 ppsum = (vec_u8)vec_packsu(psum, psum);
258 fsum = vec_perm(vdst, ppsum, fperm);
260 vec_st(fsum, 0, dst);
270 for (i = 0 ; i < h ; i++) {
271 vsrcCuc = vec_ld(stride + 0, src);
272 vsrcDuc = vec_ld(stride + 16, src);
274 vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
278 vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
280 vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc2uc);
281 vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc3uc);
283 psum = vec_mladd(vA, vsrc0ssH, vec_splat_s16(0));
284 psum = vec_mladd(vB, vsrc1ssH, psum);
285 psum = vec_mladd(vC, vsrc2ssH, psum);
286 psum = vec_mladd(vD, vsrc3ssH, psum);
287 psum = vec_add(v28ss, psum);
288 psum = vec_sr(psum, v6us);
290 vdst = vec_ld(0, dst);
291 ppsum = (vec_u8)vec_pack(psum, psum);
292 fsum = vec_perm(vdst, ppsum, fperm);
294 vec_st(fsum, 0, dst);
305 static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
306 const uint8_t * src2, int dst_stride,
307 int src_stride1, int h)
310 vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
312 mask_ = vec_lvsl(0, src2);
314 for (i = 0; i < h; i++) {
316 tmp1 = vec_ld(i * src_stride1, src1);
317 mask = vec_lvsl(i * src_stride1, src1);
318 tmp2 = vec_ld(i * src_stride1 + 15, src1);
320 a = vec_perm(tmp1, tmp2, mask);
322 tmp1 = vec_ld(i * 16, src2);
323 tmp2 = vec_ld(i * 16 + 15, src2);
325 b = vec_perm(tmp1, tmp2, mask_);
327 tmp1 = vec_ld(0, dst);
328 mask = vec_lvsl(0, dst);
329 tmp2 = vec_ld(15, dst);
333 edges = vec_perm(tmp2, tmp1, mask);
335 align = vec_lvsr(0, dst);
337 tmp2 = vec_perm(d, edges, align);
338 tmp1 = vec_perm(edges, d, align);
340 vec_st(tmp2, 15, dst);
341 vec_st(tmp1, 0 , dst);
347 static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
348 const uint8_t * src2, int dst_stride,
349 int src_stride1, int h)
352 vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
354 mask_ = vec_lvsl(0, src2);
356 for (i = 0; i < h; i++) {
358 tmp1 = vec_ld(i * src_stride1, src1);
359 mask = vec_lvsl(i * src_stride1, src1);
360 tmp2 = vec_ld(i * src_stride1 + 15, src1);
362 a = vec_perm(tmp1, tmp2, mask);
364 tmp1 = vec_ld(i * 16, src2);
365 tmp2 = vec_ld(i * 16 + 15, src2);
367 b = vec_perm(tmp1, tmp2, mask_);
369 tmp1 = vec_ld(0, dst);
370 mask = vec_lvsl(0, dst);
371 tmp2 = vec_ld(15, dst);
373 d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
375 edges = vec_perm(tmp2, tmp1, mask);
377 align = vec_lvsr(0, dst);
379 tmp2 = vec_perm(d, edges, align);
380 tmp1 = vec_perm(edges, d, align);
382 vec_st(tmp2, 15, dst);
383 vec_st(tmp1, 0 , dst);
389 /* Implemented but could be faster
390 #define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
391 #define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
394 H264_MC(put_, 16, altivec)
395 H264_MC(avg_, 16, altivec)
398 /****************************************************************************
400 ****************************************************************************/
402 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \
404 vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \
405 vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \
406 vz2 = vec_sra(vb1,vec_splat_u16(1)); \
407 vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \
408 vz3 = vec_sra(vb3,vec_splat_u16(1)); \
409 vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \
410 /* 2nd stage: output */ \
411 va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \
412 va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \
413 va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \
414 va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */
416 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
417 b0 = vec_mergeh( a0, a0 ); \
418 b1 = vec_mergeh( a1, a0 ); \
419 b2 = vec_mergeh( a2, a0 ); \
420 b3 = vec_mergeh( a3, a0 ); \
421 a0 = vec_mergeh( b0, b2 ); \
422 a1 = vec_mergel( b0, b2 ); \
423 a2 = vec_mergeh( b1, b3 ); \
424 a3 = vec_mergel( b1, b3 ); \
425 b0 = vec_mergeh( a0, a2 ); \
426 b1 = vec_mergel( a0, a2 ); \
427 b2 = vec_mergeh( a1, a3 ); \
428 b3 = vec_mergel( a1, a3 )
430 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
431 vdst_orig = vec_ld(0, dst); \
432 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
433 vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \
434 va = vec_add(va, vdst_ss); \
435 va_u8 = vec_packsu(va, zero_s16v); \
436 va_u32 = vec_splat((vec_u32)va_u8, 0); \
437 vec_ste(va_u32, element, (uint32_t*)dst);
439 static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
441 vec_s16 va0, va1, va2, va3;
442 vec_s16 vz0, vz1, vz2, vz3;
443 vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
447 const vec_u16 v6us = vec_splat_u16(6);
448 vec_u8 vdst, vdst_orig;
449 vec_u8 vdst_mask = vec_lvsl(0, dst);
450 int element = ((unsigned long)dst & 0xf) >> 2;
453 block[0] += 32; /* add 32 as a DC-level for rounding */
455 vtmp0 = vec_ld(0,block);
456 vtmp1 = vec_sld(vtmp0, vtmp0, 8);
457 vtmp2 = vec_ld(16,block);
458 vtmp3 = vec_sld(vtmp2, vtmp2, 8);
460 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
461 VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
462 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
464 va0 = vec_sra(va0,v6us);
465 va1 = vec_sra(va1,v6us);
466 va2 = vec_sra(va2,v6us);
467 va3 = vec_sra(va3,v6us);
469 VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
471 VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
473 VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
475 VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
478 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
479 /* a0 = SRC(0) + SRC(4); */ \
480 vec_s16 a0v = vec_add(s0, s4); \
481 /* a2 = SRC(0) - SRC(4); */ \
482 vec_s16 a2v = vec_sub(s0, s4); \
483 /* a4 = (SRC(2)>>1) - SRC(6); */ \
484 vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \
485 /* a6 = (SRC(6)>>1) + SRC(2); */ \
486 vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \
487 /* b0 = a0 + a6; */ \
488 vec_s16 b0v = vec_add(a0v, a6v); \
489 /* b2 = a2 + a4; */ \
490 vec_s16 b2v = vec_add(a2v, a4v); \
491 /* b4 = a2 - a4; */ \
492 vec_s16 b4v = vec_sub(a2v, a4v); \
493 /* b6 = a0 - a6; */ \
494 vec_s16 b6v = vec_sub(a0v, a6v); \
495 /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
496 /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
497 vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
498 /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
499 /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
500 vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
501 /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
502 /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
503 vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
504 /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
505 vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
506 /* b1 = (a7>>2) + a1; */ \
507 vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
508 /* b3 = a3 + (a5>>2); */ \
509 vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
510 /* b5 = (a3>>2) - a5; */ \
511 vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
512 /* b7 = a7 - (a1>>2); */ \
513 vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
514 /* DST(0, b0 + b7); */ \
515 d0 = vec_add(b0v, b7v); \
516 /* DST(1, b2 + b5); */ \
517 d1 = vec_add(b2v, b5v); \
518 /* DST(2, b4 + b3); */ \
519 d2 = vec_add(b4v, b3v); \
520 /* DST(3, b6 + b1); */ \
521 d3 = vec_add(b6v, b1v); \
522 /* DST(4, b6 - b1); */ \
523 d4 = vec_sub(b6v, b1v); \
524 /* DST(5, b4 - b3); */ \
525 d5 = vec_sub(b4v, b3v); \
526 /* DST(6, b2 - b5); */ \
527 d6 = vec_sub(b2v, b5v); \
528 /* DST(7, b0 - b7); */ \
529 d7 = vec_sub(b0v, b7v); \
532 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
533 /* unaligned load */ \
534 vec_u8 hv = vec_ld( 0, dest ); \
535 vec_u8 lv = vec_ld( 7, dest ); \
536 vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \
537 vec_s16 idct_sh6 = vec_sra(idctv, sixv); \
538 vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \
539 vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \
540 vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \
542 /* unaligned store */ \
543 vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
544 vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
545 lv = vec_sel( lv, bodyv, edgelv ); \
546 vec_st( lv, 7, dest ); \
547 hv = vec_ld( 0, dest ); \
548 edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
549 hv = vec_sel( hv, bodyv, edgehv ); \
550 vec_st( hv, 0, dest ); \
553 void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
554 vec_s16 s0, s1, s2, s3, s4, s5, s6, s7;
555 vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
556 vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
558 vec_u8 perm_ldv = vec_lvsl(0, dst);
559 vec_u8 perm_stv = vec_lvsr(8, dst);
561 const vec_u16 onev = vec_splat_u16(1);
562 const vec_u16 twov = vec_splat_u16(2);
563 const vec_u16 sixv = vec_splat_u16(6);
565 const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
568 dct[0] += 32; // rounding for the >>6 at the end
570 s0 = vec_ld(0x00, (int16_t*)dct);
571 s1 = vec_ld(0x10, (int16_t*)dct);
572 s2 = vec_ld(0x20, (int16_t*)dct);
573 s3 = vec_ld(0x30, (int16_t*)dct);
574 s4 = vec_ld(0x40, (int16_t*)dct);
575 s5 = vec_ld(0x50, (int16_t*)dct);
576 s6 = vec_ld(0x60, (int16_t*)dct);
577 s7 = vec_ld(0x70, (int16_t*)dct);
579 IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
580 d0, d1, d2, d3, d4, d5, d6, d7);
582 TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 );
584 IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7,
585 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
587 ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
588 ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
589 ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
590 ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
591 ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
592 ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
593 ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
594 ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
597 static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, DCTELEM *block, int stride, int size)
600 vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
602 DECLARE_ALIGNED_16(int, dc);
605 dc = (block[0] + 32) >> 6;
606 dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1);
609 dc16 = vec_sld(dc16, zero_s16v, 8);
610 dcplus = vec_packsu(dc16, zero_s16v);
611 dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
613 aligner = vec_lvsr(0, dst);
614 dcplus = vec_perm(dcplus, dcplus, aligner);
615 dcminus = vec_perm(dcminus, dcminus, aligner);
617 for (i = 0; i < size; i += 4) {
618 v0 = vec_ld(0, dst+0*stride);
619 v1 = vec_ld(0, dst+1*stride);
620 v2 = vec_ld(0, dst+2*stride);
621 v3 = vec_ld(0, dst+3*stride);
623 v0 = vec_adds(v0, dcplus);
624 v1 = vec_adds(v1, dcplus);
625 v2 = vec_adds(v2, dcplus);
626 v3 = vec_adds(v3, dcplus);
628 v0 = vec_subs(v0, dcminus);
629 v1 = vec_subs(v1, dcminus);
630 v2 = vec_subs(v2, dcminus);
631 v3 = vec_subs(v3, dcminus);
633 vec_st(v0, 0, dst+0*stride);
634 vec_st(v1, 0, dst+1*stride);
635 vec_st(v2, 0, dst+2*stride);
636 vec_st(v3, 0, dst+3*stride);
642 static void h264_idct_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
644 h264_idct_dc_add_internal(dst, block, stride, 4);
647 static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride)
649 h264_idct_dc_add_internal(dst, block, stride, 8);
652 static void ff_h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
655 int nnz = nnzc[ scan8[i] ];
657 if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
658 else ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
663 static void ff_h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
666 if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
667 else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
671 static void ff_h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
673 for(i=0; i<16; i+=4){
674 int nnz = nnzc[ scan8[i] ];
676 if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
677 else ff_h264_idct8_add_altivec (dst + block_offset[i], block + i*16, stride);
682 static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
684 for(i=16; i<16+8; i++){
686 ff_h264_idct_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
688 h264_idct_dc_add_altivec(dest[(i&4)>>2] + block_offset[i], block + i*16, stride);
692 #define transpose4x16(r0, r1, r2, r3) { \
693 register vec_u8 r4; \
694 register vec_u8 r5; \
695 register vec_u8 r6; \
696 register vec_u8 r7; \
698 r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \
699 r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \
700 r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \
701 r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \
703 r0 = vec_mergeh(r4, r6); /*all set 0*/ \
704 r1 = vec_mergel(r4, r6); /*all set 1*/ \
705 r2 = vec_mergeh(r5, r7); /*all set 2*/ \
706 r3 = vec_mergel(r5, r7); /*all set 3*/ \
709 static inline void write16x4(uint8_t *dst, int dst_stride,
710 register vec_u8 r0, register vec_u8 r1,
711 register vec_u8 r2, register vec_u8 r3) {
712 DECLARE_ALIGNED_16(unsigned char, result[64]);
713 uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
714 int int_dst_stride = dst_stride/4;
716 vec_st(r0, 0, result);
717 vec_st(r1, 16, result);
718 vec_st(r2, 32, result);
719 vec_st(r3, 48, result);
720 /* FIXME: there has to be a better way!!!! */
722 *(dst_int+ int_dst_stride) = *(src_int + 1);
723 *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
724 *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
725 *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
726 *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
727 *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
728 *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
729 *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
730 *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
731 *(dst_int+10*int_dst_stride) = *(src_int + 10);
732 *(dst_int+11*int_dst_stride) = *(src_int + 11);
733 *(dst_int+12*int_dst_stride) = *(src_int + 12);
734 *(dst_int+13*int_dst_stride) = *(src_int + 13);
735 *(dst_int+14*int_dst_stride) = *(src_int + 14);
736 *(dst_int+15*int_dst_stride) = *(src_int + 15);
739 /** \brief performs a 6x16 transpose of data in src, and stores it to dst
740 \todo FIXME: see if we can't spare some vec_lvsl() by them factorizing
741 out of unaligned_load() */
742 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
743 register vec_u8 r0 = unaligned_load(0, src); \
744 register vec_u8 r1 = unaligned_load( src_stride, src); \
745 register vec_u8 r2 = unaligned_load(2* src_stride, src); \
746 register vec_u8 r3 = unaligned_load(3* src_stride, src); \
747 register vec_u8 r4 = unaligned_load(4* src_stride, src); \
748 register vec_u8 r5 = unaligned_load(5* src_stride, src); \
749 register vec_u8 r6 = unaligned_load(6* src_stride, src); \
750 register vec_u8 r7 = unaligned_load(7* src_stride, src); \
751 register vec_u8 r14 = unaligned_load(14*src_stride, src); \
752 register vec_u8 r15 = unaligned_load(15*src_stride, src); \
754 r8 = unaligned_load( 8*src_stride, src); \
755 r9 = unaligned_load( 9*src_stride, src); \
756 r10 = unaligned_load(10*src_stride, src); \
757 r11 = unaligned_load(11*src_stride, src); \
758 r12 = unaligned_load(12*src_stride, src); \
759 r13 = unaligned_load(13*src_stride, src); \
761 /*Merge first pairs*/ \
762 r0 = vec_mergeh(r0, r8); /*0, 8*/ \
763 r1 = vec_mergeh(r1, r9); /*1, 9*/ \
764 r2 = vec_mergeh(r2, r10); /*2,10*/ \
765 r3 = vec_mergeh(r3, r11); /*3,11*/ \
766 r4 = vec_mergeh(r4, r12); /*4,12*/ \
767 r5 = vec_mergeh(r5, r13); /*5,13*/ \
768 r6 = vec_mergeh(r6, r14); /*6,14*/ \
769 r7 = vec_mergeh(r7, r15); /*7,15*/ \
771 /*Merge second pairs*/ \
772 r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \
773 r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \
774 r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \
775 r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \
776 r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \
777 r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \
778 r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \
779 r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \
782 r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
783 r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
784 r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
785 r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \
786 r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \
787 r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \
788 /* Don't need to compute 3 and 7*/ \
791 r8 = vec_mergeh(r0, r4); /*all set 0*/ \
792 r9 = vec_mergel(r0, r4); /*all set 1*/ \
793 r10 = vec_mergeh(r1, r5); /*all set 2*/ \
794 r11 = vec_mergel(r1, r5); /*all set 3*/ \
795 r12 = vec_mergeh(r2, r6); /*all set 4*/ \
796 r13 = vec_mergel(r2, r6); /*all set 5*/ \
797 /* Don't need to compute 14 and 15*/ \
801 // out: o = |x-y| < a
802 static inline vec_u8 diff_lt_altivec ( register vec_u8 x,
806 register vec_u8 diff = vec_subs(x, y);
807 register vec_u8 diffneg = vec_subs(y, x);
808 register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */
809 o = (vec_u8)vec_cmplt(o, a);
813 static inline vec_u8 h264_deblock_mask ( register vec_u8 p0,
817 register vec_u8 alpha,
818 register vec_u8 beta) {
820 register vec_u8 mask;
821 register vec_u8 tempmask;
823 mask = diff_lt_altivec(p0, q0, alpha);
824 tempmask = diff_lt_altivec(p1, p0, beta);
825 mask = vec_and(mask, tempmask);
826 tempmask = diff_lt_altivec(q1, q0, beta);
827 mask = vec_and(mask, tempmask);
832 // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
833 static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
837 register vec_u8 tc0) {
839 register vec_u8 average = vec_avg(p0, q0);
840 register vec_u8 temp;
841 register vec_u8 uncliped;
842 register vec_u8 ones;
845 register vec_u8 newp1;
847 temp = vec_xor(average, p2);
848 average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */
849 ones = vec_splat_u8(1);
850 temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */
851 uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
852 max = vec_adds(p1, tc0);
853 min = vec_subs(p1, tc0);
854 newp1 = vec_max(min, uncliped);
855 newp1 = vec_min(max, newp1);
859 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
861 const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
863 register vec_u8 pq0bit = vec_xor(p0,q0); \
864 register vec_u8 q1minus; \
865 register vec_u8 p0minus; \
866 register vec_u8 stage1; \
867 register vec_u8 stage2; \
868 register vec_u8 vec160; \
869 register vec_u8 delta; \
870 register vec_u8 deltaneg; \
872 q1minus = vec_nor(q1, q1); /* 255 - q1 */ \
873 stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \
874 stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \
875 p0minus = vec_nor(p0, p0); /* 255 - p0 */ \
876 stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \
877 pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
878 stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \
879 stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \
880 vec160 = vec_ld(0, &A0v); \
881 deltaneg = vec_subs(vec160, stage2); /* -d */ \
882 delta = vec_subs(stage2, vec160); /* d */ \
883 deltaneg = vec_min(tc0masked, deltaneg); \
884 delta = vec_min(tc0masked, delta); \
885 p0 = vec_subs(p0, deltaneg); \
886 q0 = vec_subs(q0, delta); \
887 p0 = vec_adds(p0, delta); \
888 q0 = vec_adds(q0, deltaneg); \
891 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
892 DECLARE_ALIGNED_16(unsigned char, temp[16]); \
893 register vec_u8 alphavec; \
894 register vec_u8 betavec; \
895 register vec_u8 mask; \
896 register vec_u8 p1mask; \
897 register vec_u8 q1mask; \
898 register vector signed char tc0vec; \
899 register vec_u8 finaltc0; \
900 register vec_u8 tc0masked; \
901 register vec_u8 newp1; \
902 register vec_u8 newq1; \
906 alphavec = vec_ld(0, temp); \
907 betavec = vec_splat(alphavec, 0x1); \
908 alphavec = vec_splat(alphavec, 0x0); \
909 mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \
911 *((int *)temp) = *((int *)tc0); \
912 tc0vec = vec_ld(0, (signed char*)temp); \
913 tc0vec = vec_mergeh(tc0vec, tc0vec); \
914 tc0vec = vec_mergeh(tc0vec, tc0vec); \
915 mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \
916 finaltc0 = vec_and((vec_u8)tc0vec, mask); /* tc = tc0 */ \
918 p1mask = diff_lt_altivec(p2, p0, betavec); \
919 p1mask = vec_and(p1mask, mask); /* if ( |p2 - p0| < beta) */ \
920 tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \
921 finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \
922 newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
925 q1mask = diff_lt_altivec(q2, q0, betavec); \
926 q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\
927 tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \
928 finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \
929 newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
932 h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
937 static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
939 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
940 register vec_u8 p2 = vec_ld(-3*stride, pix);
941 register vec_u8 p1 = vec_ld(-2*stride, pix);
942 register vec_u8 p0 = vec_ld(-1*stride, pix);
943 register vec_u8 q0 = vec_ld(0, pix);
944 register vec_u8 q1 = vec_ld(stride, pix);
945 register vec_u8 q2 = vec_ld(2*stride, pix);
946 h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
947 vec_st(p1, -2*stride, pix);
948 vec_st(p0, -1*stride, pix);
950 vec_st(q1, stride, pix);
954 static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
956 register vec_u8 line0, line1, line2, line3, line4, line5;
957 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
959 readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
960 h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
961 transpose4x16(line1, line2, line3, line4);
962 write16x4(pix-2, stride, line1, line2, line3, line4);
965 static av_always_inline
966 void weight_h264_WxH_altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset, int w, int h)
970 vec_s16 vtemp, vweight, voffset, v0, v1;
972 DECLARE_ALIGNED_16(int32_t, temp[4]);
975 offset <<= log2_denom;
976 if(log2_denom) offset += 1<<(log2_denom-1);
977 temp[0] = log2_denom;
981 vtemp = (vec_s16)vec_ld(0, temp);
982 vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
983 vweight = vec_splat(vtemp, 3);
984 voffset = vec_splat(vtemp, 5);
985 aligned = !((unsigned long)block & 0xf);
987 for (y=0; y<h; y++) {
988 vblock = vec_ld(0, block);
990 v0 = (vec_s16)vec_mergeh(zero_u8v, vblock);
991 v1 = (vec_s16)vec_mergel(zero_u8v, vblock);
993 if (w == 16 || aligned) {
994 v0 = vec_mladd(v0, vweight, zero_s16v);
995 v0 = vec_adds(v0, voffset);
996 v0 = vec_sra(v0, vlog2_denom);
998 if (w == 16 || !aligned) {
999 v1 = vec_mladd(v1, vweight, zero_s16v);
1000 v1 = vec_adds(v1, voffset);
1001 v1 = vec_sra(v1, vlog2_denom);
1003 vblock = vec_packsu(v0, v1);
1004 vec_st(vblock, 0, block);
1010 static av_always_inline
1011 void biweight_h264_WxH_altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom,
1012 int weightd, int weights, int offset, int w, int h)
1014 int y, dst_aligned, src_aligned;
1016 vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
1017 vec_u16 vlog2_denom;
1018 DECLARE_ALIGNED_16(int32_t, temp[4]);
1021 offset = ((offset + 1) | 1) << log2_denom;
1022 temp[0] = log2_denom+1;
1027 vtemp = (vec_s16)vec_ld(0, temp);
1028 vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
1029 vweights = vec_splat(vtemp, 3);
1030 vweightd = vec_splat(vtemp, 5);
1031 voffset = vec_splat(vtemp, 7);
1032 dst_aligned = !((unsigned long)dst & 0xf);
1033 src_aligned = !((unsigned long)src & 0xf);
1035 for (y=0; y<h; y++) {
1036 vdst = vec_ld(0, dst);
1037 vsrc = vec_ld(0, src);
1039 v0 = (vec_s16)vec_mergeh(zero_u8v, vdst);
1040 v1 = (vec_s16)vec_mergel(zero_u8v, vdst);
1041 v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc);
1042 v3 = (vec_s16)vec_mergel(zero_u8v, vsrc);
1051 if (w == 16 || dst_aligned) {
1052 v0 = vec_mladd(v0, vweightd, zero_s16v);
1053 v2 = vec_mladd(v2, vweights, zero_s16v);
1055 v0 = vec_adds(v0, voffset);
1056 v0 = vec_adds(v0, v2);
1057 v0 = vec_sra(v0, vlog2_denom);
1059 if (w == 16 || !dst_aligned) {
1060 v1 = vec_mladd(v1, vweightd, zero_s16v);
1061 v3 = vec_mladd(v3, vweights, zero_s16v);
1063 v1 = vec_adds(v1, voffset);
1064 v1 = vec_adds(v1, v3);
1065 v1 = vec_sra(v1, vlog2_denom);
1067 vdst = vec_packsu(v0, v1);
1068 vec_st(vdst, 0, dst);
1075 #define H264_WEIGHT(W,H) \
1076 static void ff_weight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \
1077 weight_h264_WxH_altivec(block, stride, log2_denom, weight, offset, W, H); \
1079 static void ff_biweight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
1080 biweight_h264_WxH_altivec(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
1089 void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) {
1091 if (has_altivec()) {
1092 c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
1093 c->put_no_rnd_h264_chroma_pixels_tab[0] = put_no_rnd_h264_chroma_mc8_altivec;
1094 c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
1095 c->h264_idct_add = ff_h264_idct_add_altivec;
1096 c->h264_idct_add8 = ff_h264_idct_add8_altivec;
1097 c->h264_idct_add16 = ff_h264_idct_add16_altivec;
1098 c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec;
1099 c->h264_idct_dc_add= h264_idct_dc_add_altivec;
1100 c->h264_idct8_dc_add = ff_h264_idct8_dc_add_altivec;
1101 c->h264_idct8_add = ff_h264_idct8_add_altivec;
1102 c->h264_idct8_add4 = ff_h264_idct8_add4_altivec;
1103 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
1104 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
1106 #define dspfunc(PFX, IDX, NUM) \
1107 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
1108 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
1109 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
1110 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
1111 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
1112 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
1113 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
1114 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
1115 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
1116 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
1117 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
1118 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
1119 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
1120 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
1121 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
1122 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
1124 dspfunc(put_h264_qpel, 0, 16);
1125 dspfunc(avg_h264_qpel, 0, 16);
1128 c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels16x16_altivec;
1129 c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels16x8_altivec;
1130 c->weight_h264_pixels_tab[2] = ff_weight_h264_pixels8x16_altivec;
1131 c->weight_h264_pixels_tab[3] = ff_weight_h264_pixels8x8_altivec;
1132 c->weight_h264_pixels_tab[4] = ff_weight_h264_pixels8x4_altivec;
1133 c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16x16_altivec;
1134 c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels16x8_altivec;
1135 c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels8x16_altivec;
1136 c->biweight_h264_pixels_tab[3] = ff_biweight_h264_pixels8x8_altivec;
1137 c->biweight_h264_pixels_tab[4] = ff_biweight_h264_pixels8x4_altivec;