]> git.sesse.net Git - ffmpeg/blob - libavcodec/ppc/h264dsp.c
ape: Unbreak adaptcoeffs computation
[ffmpeg] / libavcodec / ppc / h264dsp.c
1 /*
2  * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20
21 #include "config.h"
22
23 #include <stdint.h>
24 #include <string.h>
25
26 #include "libavutil/attributes.h"
27 #include "libavutil/cpu.h"
28 #include "libavutil/intreadwrite.h"
29 #include "libavutil/mem.h"
30 #include "libavutil/ppc/cpu.h"
31 #include "libavutil/ppc/types_altivec.h"
32 #include "libavutil/ppc/util_altivec.h"
33
34 #include "libavcodec/h264.h"
35 #include "libavcodec/h264dsp.h"
36
37 #if HAVE_ALTIVEC
38
39 /****************************************************************************
40  * IDCT transform:
41  ****************************************************************************/
42
43 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3)               \
44     /* 1st stage */                                               \
45     vz0 = vec_add(vb0,vb2);       /* temp[0] = Y[0] + Y[2] */     \
46     vz1 = vec_sub(vb0,vb2);       /* temp[1] = Y[0] - Y[2] */     \
47     vz2 = vec_sra(vb1,vec_splat_u16(1));                          \
48     vz2 = vec_sub(vz2,vb3);       /* temp[2] = Y[1].1/2 - Y[3] */ \
49     vz3 = vec_sra(vb3,vec_splat_u16(1));                          \
50     vz3 = vec_add(vb1,vz3);       /* temp[3] = Y[1] + Y[3].1/2 */ \
51     /* 2nd stage: output */                                       \
52     va0 = vec_add(vz0,vz3);       /* x[0] = temp[0] + temp[3] */  \
53     va1 = vec_add(vz1,vz2);       /* x[1] = temp[1] + temp[2] */  \
54     va2 = vec_sub(vz1,vz2);       /* x[2] = temp[1] - temp[2] */  \
55     va3 = vec_sub(vz0,vz3)        /* x[3] = temp[0] - temp[3] */
56
57 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
58     b0 = vec_mergeh( a0, a0 ); \
59     b1 = vec_mergeh( a1, a0 ); \
60     b2 = vec_mergeh( a2, a0 ); \
61     b3 = vec_mergeh( a3, a0 ); \
62     a0 = vec_mergeh( b0, b2 ); \
63     a1 = vec_mergel( b0, b2 ); \
64     a2 = vec_mergeh( b1, b3 ); \
65     a3 = vec_mergel( b1, b3 ); \
66     b0 = vec_mergeh( a0, a2 ); \
67     b1 = vec_mergel( a0, a2 ); \
68     b2 = vec_mergeh( a1, a3 ); \
69     b3 = vec_mergel( a1, a3 )
70
71 #if HAVE_BIGENDIAN
72 #define vdst_load(d)              \
73     vdst_orig = vec_ld(0, dst);   \
74     vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask);
75 #else
76 #define vdst_load(d) vdst = vec_vsx_ld(0, dst)
77 #endif
78
79 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va)                      \
80     vdst_load();                                              \
81     vdst_ss = (vec_s16) VEC_MERGEH(zero_u8v, vdst);           \
82     va = vec_add(va, vdst_ss);                                \
83     va_u8 = vec_packsu(va, zero_s16v);                        \
84     va_u32 = vec_splat((vec_u32)va_u8, 0);                  \
85     vec_ste(va_u32, element, (uint32_t*)dst);
86
87 static void h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride)
88 {
89     vec_s16 va0, va1, va2, va3;
90     vec_s16 vz0, vz1, vz2, vz3;
91     vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
92     vec_u8 va_u8;
93     vec_u32 va_u32;
94     vec_s16 vdst_ss;
95     const vec_u16 v6us = vec_splat_u16(6);
96     vec_u8 vdst, vdst_orig;
97     vec_u8 vdst_mask = vec_lvsl(0, dst);
98     int element = ((unsigned long)dst & 0xf) >> 2;
99     LOAD_ZERO;
100
101     block[0] += 32;  /* add 32 as a DC-level for rounding */
102
103     vtmp0 = vec_ld(0,block);
104     vtmp1 = vec_sld(vtmp0, vtmp0, 8);
105     vtmp2 = vec_ld(16,block);
106     vtmp3 = vec_sld(vtmp2, vtmp2, 8);
107     memset(block, 0, 16 * sizeof(int16_t));
108
109     VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
110     VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
111     VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
112
113     va0 = vec_sra(va0,v6us);
114     va1 = vec_sra(va1,v6us);
115     va2 = vec_sra(va2,v6us);
116     va3 = vec_sra(va3,v6us);
117
118     VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
119     dst += stride;
120     VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
121     dst += stride;
122     VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
123     dst += stride;
124     VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
125 }
126
127 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,  d0, d1, d2, d3, d4, d5, d6, d7) {\
128     /*        a0  = SRC(0) + SRC(4); */ \
129     vec_s16 a0v = vec_add(s0, s4);    \
130     /*        a2  = SRC(0) - SRC(4); */ \
131     vec_s16 a2v = vec_sub(s0, s4);    \
132     /*        a4  =           (SRC(2)>>1) - SRC(6); */ \
133     vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6);    \
134     /*        a6  =           (SRC(6)>>1) + SRC(2); */ \
135     vec_s16 a6v = vec_add(vec_sra(s6, onev), s2);    \
136     /*        b0  =         a0 + a6; */ \
137     vec_s16 b0v = vec_add(a0v, a6v);  \
138     /*        b2  =         a2 + a4; */ \
139     vec_s16 b2v = vec_add(a2v, a4v);  \
140     /*        b4  =         a2 - a4; */ \
141     vec_s16 b4v = vec_sub(a2v, a4v);  \
142     /*        b6  =         a0 - a6; */ \
143     vec_s16 b6v = vec_sub(a0v, a6v);  \
144     /* a1 =  SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
145     /*        a1 =             (SRC(5)-SRC(3)) -  (SRC(7)  +  (SRC(7)>>1)); */ \
146     vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
147     /* a3 =  SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
148     /*        a3 =             (SRC(7)+SRC(1)) -  (SRC(3)  +  (SRC(3)>>1)); */ \
149     vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
150     /* a5 =  SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
151     /*        a5 =             (SRC(7)-SRC(1)) +   SRC(5) +   (SRC(5)>>1); */ \
152     vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
153     /*        a7 =                SRC(5)+SRC(3) +  SRC(1) +   (SRC(1)>>1); */ \
154     vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
155     /*        b1 =                  (a7>>2)  +  a1; */ \
156     vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
157     /*        b3 =          a3 +        (a5>>2); */ \
158     vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
159     /*        b5 =                  (a3>>2)  -   a5; */ \
160     vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
161     /*        b7 =           a7 -        (a1>>2); */ \
162     vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
163     /* DST(0,    b0 + b7); */ \
164     d0 = vec_add(b0v, b7v); \
165     /* DST(1,    b2 + b5); */ \
166     d1 = vec_add(b2v, b5v); \
167     /* DST(2,    b4 + b3); */ \
168     d2 = vec_add(b4v, b3v); \
169     /* DST(3,    b6 + b1); */ \
170     d3 = vec_add(b6v, b1v); \
171     /* DST(4,    b6 - b1); */ \
172     d4 = vec_sub(b6v, b1v); \
173     /* DST(5,    b4 - b3); */ \
174     d5 = vec_sub(b4v, b3v); \
175     /* DST(6,    b2 - b5); */ \
176     d6 = vec_sub(b2v, b5v); \
177     /* DST(7,    b0 - b7); */ \
178     d7 = vec_sub(b0v, b7v); \
179 }
180
181 #if HAVE_BIGENDIAN
182 #define GET_2PERM(ldv, stv, d)  \
183     ldv = vec_lvsl(0, d);       \
184     stv = vec_lvsr(8, d);
185 #define dstv_load(d)            \
186     vec_u8 hv = vec_ld( 0, d ); \
187     vec_u8 lv = vec_ld( 7, d);  \
188     vec_u8 dstv   = vec_perm( hv, lv, (vec_u8)perm_ldv );
189 #define dest_unligned_store(d)                                 \
190     vec_u8 edgehv;                                             \
191     vec_u8 bodyv  = vec_perm( idstsum8, idstsum8, perm_stv );  \
192     vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv );       \
193     lv    = vec_sel( lv, bodyv, edgelv );                      \
194     vec_st( lv, 7, d );                                        \
195     hv    = vec_ld( 0, d );                                    \
196     edgehv = vec_perm( zero_u8v, sel, perm_stv );              \
197     hv    = vec_sel( hv, bodyv, edgehv );                      \
198     vec_st( hv, 0, d );
199 #else
200
201 #define GET_2PERM(ldv, stv, d) {}
202 #define dstv_load(d) vec_u8 dstv = vec_vsx_ld(0, d)
203 #define dest_unligned_store(d)\
204     vec_u8 dst8 = vec_perm((vec_u8)idstsum8, dstv, vcprm(2,3,s2,s3));\
205     vec_vsx_st(dst8, 0, d)
206 #endif /* HAVE_BIGENDIAN */
207
208 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
209     /* unaligned load */                                       \
210     dstv_load(dest);                                           \
211     vec_s16 idct_sh6 = vec_sra(idctv, sixv);                 \
212     vec_u16 dst16 = (vec_u16)VEC_MERGEH(zero_u8v, dstv);   \
213     vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16);  \
214     vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum);        \
215     /* unaligned store */                                      \
216     dest_unligned_store(dest);\
217 }
218
219 static void h264_idct8_add_altivec(uint8_t *dst, int16_t *dct, int stride)
220 {
221     vec_s16 s0, s1, s2, s3, s4, s5, s6, s7;
222     vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
223     vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
224
225     vec_u8 perm_ldv, perm_stv;
226     GET_2PERM(perm_ldv, perm_stv, dst);
227
228     const vec_u16 onev = vec_splat_u16(1);
229     const vec_u16 twov = vec_splat_u16(2);
230     const vec_u16 sixv = vec_splat_u16(6);
231
232     const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
233     LOAD_ZERO;
234
235     dct[0] += 32; // rounding for the >>6 at the end
236
237     s0 = vec_ld(0x00, (int16_t*)dct);
238     s1 = vec_ld(0x10, (int16_t*)dct);
239     s2 = vec_ld(0x20, (int16_t*)dct);
240     s3 = vec_ld(0x30, (int16_t*)dct);
241     s4 = vec_ld(0x40, (int16_t*)dct);
242     s5 = vec_ld(0x50, (int16_t*)dct);
243     s6 = vec_ld(0x60, (int16_t*)dct);
244     s7 = vec_ld(0x70, (int16_t*)dct);
245     memset(dct, 0, 64 * sizeof(int16_t));
246
247     IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
248                      d0, d1, d2, d3, d4, d5, d6, d7);
249
250     TRANSPOSE8( d0,  d1,  d2,  d3,  d4,  d5,  d6, d7 );
251
252     IDCT8_1D_ALTIVEC(d0,  d1,  d2,  d3,  d4,  d5,  d6, d7,
253                      idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
254
255     ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
256     ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
257     ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
258     ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
259     ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
260     ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
261     ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
262     ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
263 }
264
265 #if HAVE_BIGENDIAN
266 #define DST_LD vec_ld
267 #else
268 #define DST_LD vec_vsx_ld
269 #endif
270 static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, int16_t *block, int stride, int size)
271 {
272     vec_s16 dc16;
273     vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
274     vec_s32 v_dc32;
275     LOAD_ZERO;
276     DECLARE_ALIGNED(16, int, dc);
277     int i;
278
279     dc = (block[0] + 32) >> 6;
280     block[0] = 0;
281     v_dc32 = vec_lde(0, &dc);
282     dc16 = VEC_SPLAT16((vec_s16)v_dc32, 1);
283
284     if (size == 4)
285         dc16 = VEC_SLD16(dc16, zero_s16v, 8);
286     dcplus = vec_packsu(dc16, zero_s16v);
287     dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
288
289 #if HAVE_BIGENDIAN
290     aligner = vec_lvsr(0, dst);
291     dcplus = vec_perm(dcplus, dcplus, aligner);
292     dcminus = vec_perm(dcminus, dcminus, aligner);
293 #endif
294
295     for (i = 0; i < size; i += 4) {
296         v0 = DST_LD(0, dst+0*stride);
297         v1 = DST_LD(0, dst+1*stride);
298         v2 = DST_LD(0, dst+2*stride);
299         v3 = DST_LD(0, dst+3*stride);
300
301         v0 = vec_adds(v0, dcplus);
302         v1 = vec_adds(v1, dcplus);
303         v2 = vec_adds(v2, dcplus);
304         v3 = vec_adds(v3, dcplus);
305
306         v0 = vec_subs(v0, dcminus);
307         v1 = vec_subs(v1, dcminus);
308         v2 = vec_subs(v2, dcminus);
309         v3 = vec_subs(v3, dcminus);
310
311         VEC_ST(v0, 0, dst+0*stride);
312         VEC_ST(v1, 0, dst+1*stride);
313         VEC_ST(v2, 0, dst+2*stride);
314         VEC_ST(v3, 0, dst+3*stride);
315
316         dst += 4*stride;
317     }
318 }
319
320 static void h264_idct_dc_add_altivec(uint8_t *dst, int16_t *block, int stride)
321 {
322     h264_idct_dc_add_internal(dst, block, stride, 4);
323 }
324
325 static void h264_idct8_dc_add_altivec(uint8_t *dst, int16_t *block, int stride)
326 {
327     h264_idct_dc_add_internal(dst, block, stride, 8);
328 }
329
330 static void h264_idct_add16_altivec(uint8_t *dst, const int *block_offset,
331                                     int16_t *block, int stride,
332                                     const uint8_t nnzc[15 * 8])
333 {
334     int i;
335     for(i=0; i<16; i++){
336         int nnz = nnzc[ scan8[i] ];
337         if(nnz){
338             if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
339             else                      h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
340         }
341     }
342 }
343
344 static void h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset,
345                                          int16_t *block, int stride,
346                                          const uint8_t nnzc[15 * 8])
347 {
348     int i;
349     for(i=0; i<16; i++){
350         if(nnzc[ scan8[i] ]) h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
351         else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
352     }
353 }
354
355 static void h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset,
356                                     int16_t *block, int stride,
357                                     const uint8_t nnzc[15 * 8])
358 {
359     int i;
360     for(i=0; i<16; i+=4){
361         int nnz = nnzc[ scan8[i] ];
362         if(nnz){
363             if(nnz==1 && block[i*16]) h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
364             else                      h264_idct8_add_altivec(dst + block_offset[i], block + i*16, stride);
365         }
366     }
367 }
368
369 static void h264_idct_add8_altivec(uint8_t **dest, const int *block_offset,
370                                    int16_t *block, int stride,
371                                    const uint8_t nnzc[15 * 8])
372 {
373     int i, j;
374     for (j = 1; j < 3; j++) {
375         for(i = j * 16; i < j * 16 + 4; i++){
376             if(nnzc[ scan8[i] ])
377                 h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
378             else if(block[i*16])
379                 h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
380         }
381     }
382 }
383
384 #define transpose4x16(r0, r1, r2, r3) {      \
385     register vec_u8 r4;                    \
386     register vec_u8 r5;                    \
387     register vec_u8 r6;                    \
388     register vec_u8 r7;                    \
389                                              \
390     r4 = vec_mergeh(r0, r2);  /*0, 2 set 0*/ \
391     r5 = vec_mergel(r0, r2);  /*0, 2 set 1*/ \
392     r6 = vec_mergeh(r1, r3);  /*1, 3 set 0*/ \
393     r7 = vec_mergel(r1, r3);  /*1, 3 set 1*/ \
394                                              \
395     r0 = vec_mergeh(r4, r6);  /*all set 0*/  \
396     r1 = vec_mergel(r4, r6);  /*all set 1*/  \
397     r2 = vec_mergeh(r5, r7);  /*all set 2*/  \
398     r3 = vec_mergel(r5, r7);  /*all set 3*/  \
399 }
400
401 static inline void write16x4(uint8_t *dst, int dst_stride,
402                              register vec_u8 r0, register vec_u8 r1,
403                              register vec_u8 r2, register vec_u8 r3) {
404     DECLARE_ALIGNED(16, unsigned char, result)[64];
405     uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
406     int int_dst_stride = dst_stride/4;
407
408     vec_st(r0, 0, result);
409     vec_st(r1, 16, result);
410     vec_st(r2, 32, result);
411     vec_st(r3, 48, result);
412     /* FIXME: there has to be a better way!!!! */
413     *dst_int = *src_int;
414     *(dst_int+   int_dst_stride) = *(src_int + 1);
415     *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
416     *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
417     *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
418     *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
419     *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
420     *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
421     *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
422     *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
423     *(dst_int+10*int_dst_stride) = *(src_int + 10);
424     *(dst_int+11*int_dst_stride) = *(src_int + 11);
425     *(dst_int+12*int_dst_stride) = *(src_int + 12);
426     *(dst_int+13*int_dst_stride) = *(src_int + 13);
427     *(dst_int+14*int_dst_stride) = *(src_int + 14);
428     *(dst_int+15*int_dst_stride) = *(src_int + 15);
429 }
430
431 /** @brief performs a 6x16 transpose of data in src, and stores it to dst
432     @todo FIXME: see if we can't spare some vec_lvsl() by them factorizing
433     out of unaligned_load() */
434 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
435     register vec_u8 r0  = unaligned_load(0,             src);            \
436     register vec_u8 r1  = unaligned_load(   src_stride, src);            \
437     register vec_u8 r2  = unaligned_load(2* src_stride, src);            \
438     register vec_u8 r3  = unaligned_load(3* src_stride, src);            \
439     register vec_u8 r4  = unaligned_load(4* src_stride, src);            \
440     register vec_u8 r5  = unaligned_load(5* src_stride, src);            \
441     register vec_u8 r6  = unaligned_load(6* src_stride, src);            \
442     register vec_u8 r7  = unaligned_load(7* src_stride, src);            \
443     register vec_u8 r14 = unaligned_load(14*src_stride, src);            \
444     register vec_u8 r15 = unaligned_load(15*src_stride, src);            \
445                                                                            \
446     r8  = unaligned_load( 8*src_stride, src);                              \
447     r9  = unaligned_load( 9*src_stride, src);                              \
448     r10 = unaligned_load(10*src_stride, src);                              \
449     r11 = unaligned_load(11*src_stride, src);                              \
450     r12 = unaligned_load(12*src_stride, src);                              \
451     r13 = unaligned_load(13*src_stride, src);                              \
452                                                                            \
453     /*Merge first pairs*/                                                  \
454     r0 = vec_mergeh(r0, r8);    /*0, 8*/                                   \
455     r1 = vec_mergeh(r1, r9);    /*1, 9*/                                   \
456     r2 = vec_mergeh(r2, r10);   /*2,10*/                                   \
457     r3 = vec_mergeh(r3, r11);   /*3,11*/                                   \
458     r4 = vec_mergeh(r4, r12);   /*4,12*/                                   \
459     r5 = vec_mergeh(r5, r13);   /*5,13*/                                   \
460     r6 = vec_mergeh(r6, r14);   /*6,14*/                                   \
461     r7 = vec_mergeh(r7, r15);   /*7,15*/                                   \
462                                                                            \
463     /*Merge second pairs*/                                                 \
464     r8  = vec_mergeh(r0, r4);   /*0,4, 8,12 set 0*/                        \
465     r9  = vec_mergel(r0, r4);   /*0,4, 8,12 set 1*/                        \
466     r10 = vec_mergeh(r1, r5);   /*1,5, 9,13 set 0*/                        \
467     r11 = vec_mergel(r1, r5);   /*1,5, 9,13 set 1*/                        \
468     r12 = vec_mergeh(r2, r6);   /*2,6,10,14 set 0*/                        \
469     r13 = vec_mergel(r2, r6);   /*2,6,10,14 set 1*/                        \
470     r14 = vec_mergeh(r3, r7);   /*3,7,11,15 set 0*/                        \
471     r15 = vec_mergel(r3, r7);   /*3,7,11,15 set 1*/                        \
472                                                                            \
473     /*Third merge*/                                                        \
474     r0 = vec_mergeh(r8,  r12);  /*0,2,4,6,8,10,12,14 set 0*/               \
475     r1 = vec_mergel(r8,  r12);  /*0,2,4,6,8,10,12,14 set 1*/               \
476     r2 = vec_mergeh(r9,  r13);  /*0,2,4,6,8,10,12,14 set 2*/               \
477     r4 = vec_mergeh(r10, r14);  /*1,3,5,7,9,11,13,15 set 0*/               \
478     r5 = vec_mergel(r10, r14);  /*1,3,5,7,9,11,13,15 set 1*/               \
479     r6 = vec_mergeh(r11, r15);  /*1,3,5,7,9,11,13,15 set 2*/               \
480     /* Don't need to compute 3 and 7*/                                     \
481                                                                            \
482     /*Final merge*/                                                        \
483     r8  = vec_mergeh(r0, r4);   /*all set 0*/                              \
484     r9  = vec_mergel(r0, r4);   /*all set 1*/                              \
485     r10 = vec_mergeh(r1, r5);   /*all set 2*/                              \
486     r11 = vec_mergel(r1, r5);   /*all set 3*/                              \
487     r12 = vec_mergeh(r2, r6);   /*all set 4*/                              \
488     r13 = vec_mergel(r2, r6);   /*all set 5*/                              \
489     /* Don't need to compute 14 and 15*/                                   \
490                                                                            \
491 }
492
493 // out: o = |x-y| < a
494 static inline vec_u8 diff_lt_altivec ( register vec_u8 x,
495                                          register vec_u8 y,
496                                          register vec_u8 a) {
497
498     register vec_u8 diff = vec_subs(x, y);
499     register vec_u8 diffneg = vec_subs(y, x);
500     register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */
501     o = (vec_u8)vec_cmplt(o, a);
502     return o;
503 }
504
505 static inline vec_u8 h264_deblock_mask ( register vec_u8 p0,
506                                            register vec_u8 p1,
507                                            register vec_u8 q0,
508                                            register vec_u8 q1,
509                                            register vec_u8 alpha,
510                                            register vec_u8 beta) {
511
512     register vec_u8 mask;
513     register vec_u8 tempmask;
514
515     mask = diff_lt_altivec(p0, q0, alpha);
516     tempmask = diff_lt_altivec(p1, p0, beta);
517     mask = vec_and(mask, tempmask);
518     tempmask = diff_lt_altivec(q1, q0, beta);
519     mask = vec_and(mask, tempmask);
520
521     return mask;
522 }
523
524 // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
525 static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
526                                        register vec_u8 p1,
527                                        register vec_u8 p2,
528                                        register vec_u8 q0,
529                                        register vec_u8 tc0) {
530
531     register vec_u8 average = vec_avg(p0, q0);
532     register vec_u8 temp;
533     register vec_u8 unclipped;
534     register vec_u8 ones;
535     register vec_u8 max;
536     register vec_u8 min;
537     register vec_u8 newp1;
538
539     temp = vec_xor(average, p2);
540     average = vec_avg(average, p2);     /*avg(p2, avg(p0, q0)) */
541     ones = vec_splat_u8(1);
542     temp = vec_and(temp, ones);         /*(p2^avg(p0, q0)) & 1 */
543     unclipped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
544     max = vec_adds(p1, tc0);
545     min = vec_subs(p1, tc0);
546     newp1 = vec_max(min, unclipped);
547     newp1 = vec_min(max, newp1);
548     return newp1;
549 }
550
551 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) {                                           \
552                                                                                                   \
553     const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4));                               \
554                                                                                                   \
555     register vec_u8 pq0bit = vec_xor(p0,q0);                                                    \
556     register vec_u8 q1minus;                                                                    \
557     register vec_u8 p0minus;                                                                    \
558     register vec_u8 stage1;                                                                     \
559     register vec_u8 stage2;                                                                     \
560     register vec_u8 vec160;                                                                     \
561     register vec_u8 delta;                                                                      \
562     register vec_u8 deltaneg;                                                                   \
563                                                                                                   \
564     q1minus = vec_nor(q1, q1);                 /* 255 - q1 */                                     \
565     stage1 = vec_avg(p1, q1minus);             /* (p1 - q1 + 256)>>1 */                           \
566     stage2 = vec_sr(stage1, vec_splat_u8(1));  /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */     \
567     p0minus = vec_nor(p0, p0);                 /* 255 - p0 */                                     \
568     stage1 = vec_avg(q0, p0minus);             /* (q0 - p0 + 256)>>1 */                           \
569     pq0bit = vec_and(pq0bit, vec_splat_u8(1));                                                    \
570     stage2 = vec_avg(stage2, pq0bit);          /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \
571     stage2 = vec_adds(stage2, stage1);         /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */  \
572     vec160 = vec_ld(0, &A0v);                                                                     \
573     deltaneg = vec_subs(vec160, stage2);       /* -d */                                           \
574     delta = vec_subs(stage2, vec160);          /* d */                                            \
575     deltaneg = vec_min(tc0masked, deltaneg);                                                      \
576     delta = vec_min(tc0masked, delta);                                                            \
577     p0 = vec_subs(p0, deltaneg);                                                                  \
578     q0 = vec_subs(q0, delta);                                                                     \
579     p0 = vec_adds(p0, delta);                                                                     \
580     q0 = vec_adds(q0, deltaneg);                                                                  \
581 }
582
583 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) {            \
584     DECLARE_ALIGNED(16, unsigned char, temp)[16];                                             \
585     register vec_u8 alphavec;                                                              \
586     register vec_u8 betavec;                                                               \
587     register vec_u8 mask;                                                                  \
588     register vec_u8 p1mask;                                                                \
589     register vec_u8 q1mask;                                                                \
590     register vector signed   char tc0vec;                                                    \
591     register vec_u8 finaltc0;                                                              \
592     register vec_u8 tc0masked;                                                             \
593     register vec_u8 newp1;                                                                 \
594     register vec_u8 newq1;                                                                 \
595                                                                                              \
596     temp[0] = alpha;                                                                         \
597     temp[1] = beta;                                                                          \
598     alphavec = vec_ld(0, temp);                                                              \
599     betavec = vec_splat(alphavec, 0x1);                                                      \
600     alphavec = vec_splat(alphavec, 0x0);                                                     \
601     mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */            \
602                                                                                              \
603     AV_COPY32(temp, tc0);                                                                    \
604     tc0vec = vec_ld(0, (signed char*)temp);                                                  \
605     tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \
606     tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \
607     mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1)));  /* if tc0[i] >= 0 */         \
608     finaltc0 = vec_and((vec_u8)tc0vec, mask);     /* tc = tc0 */                           \
609                                                                                              \
610     p1mask = diff_lt_altivec(p2, p0, betavec);                                               \
611     p1mask = vec_and(p1mask, mask);                             /* if ( |p2 - p0| < beta) */ \
612     tc0masked = vec_and(p1mask, (vec_u8)tc0vec);                                           \
613     finaltc0 = vec_sub(finaltc0, p1mask);                       /* tc++ */                   \
614     newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked);                                      \
615     /*end if*/                                                                               \
616                                                                                              \
617     q1mask = diff_lt_altivec(q2, q0, betavec);                                               \
618     q1mask = vec_and(q1mask, mask);                             /* if ( |q2 - q0| < beta ) */\
619     tc0masked = vec_and(q1mask, (vec_u8)tc0vec);                                           \
620     finaltc0 = vec_sub(finaltc0, q1mask);                       /* tc++ */                   \
621     newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked);                                      \
622     /*end if*/                                                                               \
623                                                                                              \
624     h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0);                                            \
625     p1 = newp1;                                                                              \
626     q1 = newq1;                                                                              \
627 }
628
629 static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
630
631     if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
632         register vec_u8 p2 = vec_ld(-3*stride, pix);
633         register vec_u8 p1 = vec_ld(-2*stride, pix);
634         register vec_u8 p0 = vec_ld(-1*stride, pix);
635         register vec_u8 q0 = vec_ld(0, pix);
636         register vec_u8 q1 = vec_ld(stride, pix);
637         register vec_u8 q2 = vec_ld(2*stride, pix);
638         h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
639         vec_st(p1, -2*stride, pix);
640         vec_st(p0, -1*stride, pix);
641         vec_st(q0, 0, pix);
642         vec_st(q1, stride, pix);
643     }
644 }
645
646 static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
647
648     register vec_u8 line0, line1, line2, line3, line4, line5;
649     if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
650         return;
651     readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
652     h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
653     transpose4x16(line1, line2, line3, line4);
654     write16x4(pix-2, stride, line1, line2, line3, line4);
655 }
656
657 static av_always_inline
658 void weight_h264_W_altivec(uint8_t *block, int stride, int height,
659                            int log2_denom, int weight, int offset, int w)
660 {
661     int y, aligned;
662     vec_u8 vblock;
663     vec_s16 vtemp, vweight, voffset, v0, v1;
664     vec_u16 vlog2_denom;
665     DECLARE_ALIGNED(16, int32_t, temp)[4];
666     LOAD_ZERO;
667
668     offset <<= log2_denom;
669     if(log2_denom) offset += 1<<(log2_denom-1);
670     temp[0] = log2_denom;
671     temp[1] = weight;
672     temp[2] = offset;
673
674     vtemp = (vec_s16)vec_ld(0, temp);
675 #if !HAVE_BIGENDIAN
676     vtemp =(vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3));
677 #endif
678     vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
679     vweight = vec_splat(vtemp, 3);
680     voffset = vec_splat(vtemp, 5);
681     aligned = !((unsigned long)block & 0xf);
682
683     for (y = 0; y < height; y++) {
684         vblock = vec_ld(0, block);
685
686         v0 = (vec_s16)VEC_MERGEH(zero_u8v, vblock);
687         v1 = (vec_s16)VEC_MERGEL(zero_u8v, vblock);
688
689         if (w == 16 || aligned) {
690             v0 = vec_mladd(v0, vweight, zero_s16v);
691             v0 = vec_adds(v0, voffset);
692             v0 = vec_sra(v0, vlog2_denom);
693         }
694         if (w == 16 || !aligned) {
695             v1 = vec_mladd(v1, vweight, zero_s16v);
696             v1 = vec_adds(v1, voffset);
697             v1 = vec_sra(v1, vlog2_denom);
698         }
699         vblock = vec_packsu(v0, v1);
700         vec_st(vblock, 0, block);
701
702         block += stride;
703     }
704 }
705
706 static av_always_inline
707 void biweight_h264_W_altivec(uint8_t *dst, uint8_t *src, int stride, int height,
708                              int log2_denom, int weightd, int weights, int offset, int w)
709 {
710     int y, dst_aligned, src_aligned;
711     vec_u8 vsrc, vdst;
712     vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
713     vec_u16 vlog2_denom;
714     DECLARE_ALIGNED(16, int32_t, temp)[4];
715     LOAD_ZERO;
716
717     offset = ((offset + 1) | 1) << log2_denom;
718     temp[0] = log2_denom+1;
719     temp[1] = weights;
720     temp[2] = weightd;
721     temp[3] = offset;
722
723     vtemp = (vec_s16)vec_ld(0, temp);
724 #if !HAVE_BIGENDIAN
725     vtemp =(vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3));
726 #endif
727     vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
728     vweights = vec_splat(vtemp, 3);
729     vweightd = vec_splat(vtemp, 5);
730     voffset = vec_splat(vtemp, 7);
731     dst_aligned = !((unsigned long)dst & 0xf);
732     src_aligned = !((unsigned long)src & 0xf);
733
734     for (y = 0; y < height; y++) {
735         vdst = vec_ld(0, dst);
736         vsrc = vec_ld(0, src);
737
738         v0 = (vec_s16)VEC_MERGEH(zero_u8v, vdst);
739         v1 = (vec_s16)VEC_MERGEL(zero_u8v, vdst);
740         v2 = (vec_s16)VEC_MERGEH(zero_u8v, vsrc);
741         v3 = (vec_s16)VEC_MERGEL(zero_u8v, vsrc);
742
743         if (w == 8) {
744             if (src_aligned)
745                 v3 = v2;
746             else
747                 v2 = v3;
748         }
749
750         if (w == 16 || dst_aligned) {
751             v0 = vec_mladd(v0, vweightd, zero_s16v);
752             v2 = vec_mladd(v2, vweights, zero_s16v);
753
754             v0 = vec_adds(v0, voffset);
755             v0 = vec_adds(v0, v2);
756             v0 = vec_sra(v0, vlog2_denom);
757         }
758         if (w == 16 || !dst_aligned) {
759             v1 = vec_mladd(v1, vweightd, zero_s16v);
760             v3 = vec_mladd(v3, vweights, zero_s16v);
761
762             v1 = vec_adds(v1, voffset);
763             v1 = vec_adds(v1, v3);
764             v1 = vec_sra(v1, vlog2_denom);
765         }
766         vdst = vec_packsu(v0, v1);
767         vec_st(vdst, 0, dst);
768
769         dst += stride;
770         src += stride;
771     }
772 }
773
774 #define H264_WEIGHT(W) \
775 static void weight_h264_pixels ## W ## _altivec(uint8_t *block, int stride, int height, \
776                                                 int log2_denom, int weight, int offset) \
777 { \
778     weight_h264_W_altivec(block, stride, height, log2_denom, weight, offset, W); \
779 }\
780 static void biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, int stride, int height, \
781                                                   int log2_denom, int weightd, int weights, int offset) \
782 { \
783     biweight_h264_W_altivec(dst, src, stride, height, log2_denom, weightd, weights, offset, W); \
784 }
785
786 H264_WEIGHT(16)
787 H264_WEIGHT( 8)
788 #endif /* HAVE_ALTIVEC */
789
790 av_cold void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth,
791                                  const int chroma_format_idc)
792 {
793 #if HAVE_ALTIVEC
794     if (!PPC_ALTIVEC(av_get_cpu_flags()))
795         return;
796
797     if (bit_depth == 8) {
798         c->h264_idct_add = h264_idct_add_altivec;
799         if (chroma_format_idc <= 1)
800             c->h264_idct_add8 = h264_idct_add8_altivec;
801         c->h264_idct_add16      = h264_idct_add16_altivec;
802         c->h264_idct_add16intra = h264_idct_add16intra_altivec;
803         c->h264_idct_dc_add= h264_idct_dc_add_altivec;
804         c->h264_idct8_dc_add = h264_idct8_dc_add_altivec;
805         c->h264_idct8_add    = h264_idct8_add_altivec;
806         c->h264_idct8_add4   = h264_idct8_add4_altivec;
807         c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
808         c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
809
810         c->weight_h264_pixels_tab[0]   = weight_h264_pixels16_altivec;
811         c->weight_h264_pixels_tab[1]   = weight_h264_pixels8_altivec;
812         c->biweight_h264_pixels_tab[0] = biweight_h264_pixels16_altivec;
813         c->biweight_h264_pixels_tab[1] = biweight_h264_pixels8_altivec;
814     }
815 #endif /* HAVE_ALTIVEC */
816 }