]> git.sesse.net Git - x264/blob - common/ppc/mc.c
Bump dates to 2015
[x264] / common / ppc / mc.c
1 /*****************************************************************************
2  * mc.c: ppc motion compensation
3  *****************************************************************************
4  * Copyright (C) 2003-2015 x264 project
5  *
6  * Authors: Eric Petit <eric.petit@lapsus.org>
7  *          Guillaume Poirier <gpoirier@mplayerhq.hu>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
22  *
23  * This program is also available under a commercial proprietary license.
24  * For more information, contact us at licensing@x264.com.
25  *****************************************************************************/
26
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <string.h>
30 #include <stdint.h>
31 #include <stdarg.h>
32
33 #include "x264.h"
34 #include "common/common.h"
35 #include "common/mc.h"
36 #include "mc.h"
37 #include "ppccommon.h"
38
39 #if !HIGH_BIT_DEPTH
40 typedef void (*pf_mc_t)( uint8_t *src, intptr_t i_src,
41                          uint8_t *dst, intptr_t i_dst, int i_height );
42
43 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
44 {
45     return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] +
46            pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] +
47            pix[ 3*i_pix_next];
48 }
49
50 static inline int x264_tapfilter1( uint8_t *pix )
51 {
52     return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] +
53            pix[ 3];
54 }
55
56 static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst,  intptr_t i_dst,
57                                                uint8_t *src1, intptr_t i_src1,
58                                                uint8_t *src2, int i_height )
59 {
60     for( int y = 0; y < i_height; y++ )
61     {
62         for( int x = 0; x < 4; x++ )
63             dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
64         dst  += i_dst;
65         src1 += i_src1;
66         src2 += i_src1;
67     }
68 }
69
70 static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst,  intptr_t i_dst,
71                                                uint8_t *src1, intptr_t i_src1,
72                                                uint8_t *src2, int i_height )
73 {
74     vec_u8_t src1v, src2v;
75     PREP_LOAD;
76     PREP_STORE8;
77     PREP_LOAD_SRC( src1 );
78     PREP_LOAD_SRC( src2 );
79
80     for( int y = 0; y < i_height; y++ )
81     {
82         VEC_LOAD( src1, src1v, 8, vec_u8_t, src1 );
83         VEC_LOAD( src2, src2v, 8, vec_u8_t, src2 );
84         src1v = vec_avg( src1v, src2v );
85         VEC_STORE8( src1v, dst );
86
87         dst  += i_dst;
88         src1 += i_src1;
89         src2 += i_src1;
90     }
91 }
92
93 static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst,  intptr_t i_dst,
94                                                 uint8_t *src1, intptr_t i_src1,
95                                                 uint8_t *src2, int i_height )
96 {
97     vec_u8_t src1v, src2v;
98     PREP_LOAD;
99     PREP_LOAD_SRC( src1 );
100     PREP_LOAD_SRC( src2 );
101
102     for( int y = 0; y < i_height; y++ )
103     {
104         VEC_LOAD( src1, src1v, 16, vec_u8_t, src1 );
105         VEC_LOAD( src2, src2v, 16, vec_u8_t, src2 );
106         src1v = vec_avg( src1v, src2v );
107         vec_st(src1v, 0, dst);
108
109         dst  += i_dst;
110         src1 += i_src1;
111         src2 += i_src1;
112     }
113 }
114
115 static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst,  intptr_t i_dst,
116                                                 uint8_t *src1, intptr_t i_src1,
117                                                 uint8_t *src2, int i_height )
118 {
119     x264_pixel_avg2_w16_altivec(dst, i_dst, src1, i_src1, src2, i_height);
120     x264_pixel_avg2_w4_altivec(dst+16, i_dst, src1+16, i_src1, src2+16, i_height);
121 }
122
123 /* mc_copy: plain c */
124
125 #define MC_COPY( name, a )                                \
126 static void name( uint8_t *dst, intptr_t i_dst,           \
127                   uint8_t *src, intptr_t i_src, int i_height ) \
128 {                                                         \
129     int y;                                                \
130     for( y = 0; y < i_height; y++ )                       \
131     {                                                     \
132         memcpy( dst, src, a );                            \
133         src += i_src;                                     \
134         dst += i_dst;                                     \
135     }                                                     \
136 }
137 MC_COPY( x264_mc_copy_w4_altivec,  4  )
138 MC_COPY( x264_mc_copy_w8_altivec,  8  )
139
140 static void x264_mc_copy_w16_altivec( uint8_t *dst, intptr_t i_dst,
141                                       uint8_t *src, intptr_t i_src, int i_height )
142 {
143     vec_u8_t cpyV;
144     PREP_LOAD;
145     PREP_LOAD_SRC( src );
146
147     for( int y = 0; y < i_height; y++ )
148     {
149         VEC_LOAD( src, cpyV, 16, vec_u8_t, src );
150         vec_st(cpyV, 0, dst);
151
152         src += i_src;
153         dst += i_dst;
154     }
155 }
156
157
158 static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, intptr_t i_dst,
159                                               uint8_t *src, intptr_t i_src, int i_height )
160 {
161     for( int y = 0; y < i_height; ++y )
162     {
163         vec_u8_t cpyV = vec_ld( 0, src );
164         vec_st(cpyV, 0, dst);
165
166         src += i_src;
167         dst += i_dst;
168     }
169 }
170
171
172 static void mc_luma_altivec( uint8_t *dst,    intptr_t i_dst_stride,
173                              uint8_t *src[4], intptr_t i_src_stride,
174                              int mvx, int mvy,
175                              int i_width, int i_height, const x264_weight_t *weight )
176 {
177     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
178     intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
179     uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
180     if( qpel_idx & 5 ) /* qpel interpolation needed */
181     {
182         uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
183
184         switch( i_width )
185         {
186             case 4:
187                 x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
188                 break;
189             case 8:
190                 x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
191                 break;
192             case 16:
193             default:
194                 x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
195         }
196         if( weight->weightfn )
197             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
198     }
199     else if( weight->weightfn )
200         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
201     else
202     {
203         switch( i_width )
204         {
205             case 4:
206                 x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
207                 break;
208             case 8:
209                 x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
210                 break;
211             case 16:
212                 x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
213                 break;
214         }
215     }
216 }
217
218
219
220 static uint8_t *get_ref_altivec( uint8_t *dst,   intptr_t *i_dst_stride,
221                                  uint8_t *src[4], intptr_t i_src_stride,
222                                  int mvx, int mvy,
223                                  int i_width, int i_height, const x264_weight_t *weight )
224 {
225     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
226     intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
227     uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
228     if( qpel_idx & 5 ) /* qpel interpolation needed */
229     {
230         uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
231         switch( i_width )
232         {
233             case 4:
234                 x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
235                 break;
236             case 8:
237                 x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
238                 break;
239             case 12:
240             case 16:
241             default:
242                 x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
243                 break;
244             case 20:
245                 x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
246                 break;
247         }
248         if( weight->weightfn )
249             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
250         return dst;
251     }
252     else if( weight->weightfn )
253     {
254         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
255         return dst;
256     }
257     else
258     {
259         *i_dst_stride = i_src_stride;
260         return src1;
261     }
262 }
263
264 static void mc_chroma_2xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
265                            uint8_t *src, intptr_t i_src_stride,
266                            int mvx, int mvy, int i_height )
267 {
268     uint8_t *srcp;
269     int d8x = mvx&0x07;
270     int d8y = mvy&0x07;
271
272     int cA = (8-d8x)*(8-d8y);
273     int cB = d8x    *(8-d8y);
274     int cC = (8-d8x)*d8y;
275     int cD = d8x    *d8y;
276
277     src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
278     srcp = &src[i_src_stride];
279
280     for( int y = 0; y < i_height; y++ )
281     {
282         dstu[0] = ( cA*src[0] + cB*src[2] + cC*srcp[0] + cD*srcp[2] + 32 ) >> 6;
283         dstv[0] = ( cA*src[1] + cB*src[3] + cC*srcp[1] + cD*srcp[3] + 32 ) >> 6;
284         dstu[1] = ( cA*src[2] + cB*src[4] + cC*srcp[2] + cD*srcp[4] + 32 ) >> 6;
285         dstv[1] = ( cA*src[3] + cB*src[5] + cC*srcp[3] + cD*srcp[5] + 32 ) >> 6;
286
287         src  += i_src_stride;
288         srcp += i_src_stride;
289         dstu += i_dst_stride;
290         dstv += i_dst_stride;
291     }
292  }
293
294 static void mc_chroma_altivec_4xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
295                                    uint8_t *src, intptr_t i_src_stride,
296                                    int mvx, int mvy, int i_height )
297 {
298     uint8_t *srcp;
299     int d8x = mvx & 0x07;
300     int d8y = mvy & 0x07;
301
302     ALIGNED_16( uint16_t coeff[4] );
303     coeff[0] = (8-d8x)*(8-d8y);
304     coeff[1] = d8x    *(8-d8y);
305     coeff[2] = (8-d8x)*d8y;
306     coeff[3] = d8x    *d8y;
307
308     src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
309     srcp = &src[i_src_stride];
310
311     LOAD_ZERO;
312     PREP_LOAD;
313     PREP_LOAD_SRC( src );
314     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
315     vec_u8_t    src2v_8, dstuv, dstvv;
316     vec_u16_t   src0v_16, src1v_16, src2v_16, src3v_16, dstv16;
317     vec_u16_t   shiftv, k32v;
318
319     static const vec_u8_t perm0v = CV(1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13);
320     static const vec_u8_t perm1v = CV(3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15);
321
322     coeff0v = vec_ld( 0, coeff );
323     coeff3v = vec_splat( coeff0v, 3 );
324     coeff2v = vec_splat( coeff0v, 2 );
325     coeff1v = vec_splat( coeff0v, 1 );
326     coeff0v = vec_splat( coeff0v, 0 );
327     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
328     shiftv  = vec_splat_u16( 6 );
329
330     VEC_LOAD( src, src2v_8, 9, vec_u8_t, src );
331     src2v_16 = vec_u8_to_u16( src2v_8 );
332     src3v_16 = vec_u8_to_u16( vec_sld( src2v_8, src2v_8, 2 ) );
333
334     for( int y = 0; y < i_height; y += 2 )
335     {
336         src0v_16 = src2v_16;
337         src1v_16 = src3v_16;
338         VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
339         src2v_16 = vec_u8_to_u16( src2v_8 );
340         src3v_16 = vec_u8_to_u16( vec_sld( src2v_8, src2v_8, 2 ) );
341
342         dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
343         dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
344         dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
345         dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
346
347         dstv16 = vec_sr( dstv16, shiftv );
348
349         dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
350         dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
351         vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
352         vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
353
354         srcp += i_src_stride;
355         dstu += i_dst_stride;
356         dstv += i_dst_stride;
357
358         src0v_16 = src2v_16;
359         src1v_16 = src3v_16;
360         VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
361         src2v_16 = vec_u8_to_u16( src2v_8 );
362         src3v_16 = vec_u8_to_u16( vec_sld( src2v_8, src2v_8, 2 ) );
363
364         dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
365         dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
366         dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
367         dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
368
369         dstv16 = vec_sr( dstv16, shiftv );
370
371         dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
372         dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
373         vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
374         vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
375
376         srcp += i_src_stride;
377         dstu += i_dst_stride;
378         dstv += i_dst_stride;
379     }
380 }
381
382 static void mc_chroma_altivec_8xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
383                                    uint8_t *src, intptr_t i_src_stride,
384                                    int mvx, int mvy, int i_height )
385 {
386     uint8_t *srcp;
387     int d8x = mvx & 0x07;
388     int d8y = mvy & 0x07;
389
390     ALIGNED_16( uint16_t coeff[4] );
391     coeff[0] = (8-d8x)*(8-d8y);
392     coeff[1] = d8x    *(8-d8y);
393     coeff[2] = (8-d8x)*d8y;
394     coeff[3] = d8x    *d8y;
395
396     src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
397     srcp = &src[i_src_stride];
398
399     LOAD_ZERO;
400     PREP_LOAD;
401     PREP_LOAD_SRC( src );
402     PREP_STORE8;
403     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
404     vec_u8_t    src0v_8, src1v_8, src2v_8, src3v_8;
405     vec_u8_t    dstuv, dstvv;
406     vec_u16_t   src0v_16h, src1v_16h, src2v_16h, src3v_16h, dstv_16h;
407     vec_u16_t   src0v_16l, src1v_16l, src2v_16l, src3v_16l, dstv_16l;
408     vec_u16_t   shiftv, k32v;
409
410     coeff0v = vec_ld( 0, coeff );
411     coeff3v = vec_splat( coeff0v, 3 );
412     coeff2v = vec_splat( coeff0v, 2 );
413     coeff1v = vec_splat( coeff0v, 1 );
414     coeff0v = vec_splat( coeff0v, 0 );
415     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
416     shiftv  = vec_splat_u16( 6 );
417
418     static const vec_u8_t perm0v = CV(1,5,9,13,17,21,25,29,0,0,0,0,0,0,0,0);
419     static const vec_u8_t perm1v = CV(3,7,11,15,19,23,27,31,0,0,0,0,0,0,0,0);
420
421     VEC_LOAD( src, src2v_8, 16, vec_u8_t, src );
422     VEC_LOAD( src+16, src3v_8, 2, vec_u8_t, src );
423     src3v_8 = vec_sld( src2v_8, src3v_8, 2 );
424
425     for( int y = 0; y < i_height; y += 2 )
426     {
427         src0v_8 = src2v_8;
428         src1v_8 = src3v_8;
429         VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
430         VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
431
432         src3v_8 = vec_sld( src2v_8, src3v_8, 2 );
433
434         src0v_16h = vec_u8_to_u16_h( src0v_8 );
435         src0v_16l = vec_u8_to_u16_l( src0v_8 );
436         src1v_16h = vec_u8_to_u16_h( src1v_8 );
437         src1v_16l = vec_u8_to_u16_l( src1v_8 );
438         src2v_16h = vec_u8_to_u16_h( src2v_8 );
439         src2v_16l = vec_u8_to_u16_l( src2v_8 );
440         src3v_16h = vec_u8_to_u16_h( src3v_8 );
441         src3v_16l = vec_u8_to_u16_l( src3v_8 );
442
443         dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
444         dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
445         dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
446         dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
447         dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
448         dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
449         dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
450         dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
451
452         dstv_16h = vec_sr( dstv_16h, shiftv );
453         dstv_16l = vec_sr( dstv_16l, shiftv );
454
455         dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
456         dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
457
458         VEC_STORE8( dstuv, dstu );
459         VEC_STORE8( dstvv, dstv );
460
461         srcp += i_src_stride;
462         dstu += i_dst_stride;
463         dstv += i_dst_stride;
464
465         src0v_8 = src2v_8;
466         src1v_8 = src3v_8;
467         VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
468         VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
469
470         src3v_8 = vec_sld( src2v_8, src3v_8, 2 );
471
472         src0v_16h = vec_u8_to_u16_h( src0v_8 );
473         src0v_16l = vec_u8_to_u16_l( src0v_8 );
474         src1v_16h = vec_u8_to_u16_h( src1v_8 );
475         src1v_16l = vec_u8_to_u16_l( src1v_8 );
476         src2v_16h = vec_u8_to_u16_h( src2v_8 );
477         src2v_16l = vec_u8_to_u16_l( src2v_8 );
478         src3v_16h = vec_u8_to_u16_h( src3v_8 );
479         src3v_16l = vec_u8_to_u16_l( src3v_8 );
480
481         dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
482         dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
483         dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
484         dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
485         dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
486         dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
487         dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
488         dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
489
490         dstv_16h = vec_sr( dstv_16h, shiftv );
491         dstv_16l = vec_sr( dstv_16l, shiftv );
492
493         dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
494         dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
495
496         VEC_STORE8( dstuv, dstu );
497         VEC_STORE8( dstvv, dstv );
498
499         srcp += i_src_stride;
500         dstu += i_dst_stride;
501         dstv += i_dst_stride;
502     }
503 }
504
505 static void mc_chroma_altivec( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
506                                uint8_t *src, intptr_t i_src_stride,
507                                int mvx, int mvy, int i_width, int i_height )
508 {
509     if( i_width == 8 )
510         mc_chroma_altivec_8xh( dstu, dstv, i_dst_stride, src, i_src_stride,
511                                mvx, mvy, i_height );
512     else if( i_width == 4 )
513         mc_chroma_altivec_4xh( dstu, dstv, i_dst_stride, src, i_src_stride,
514                                mvx, mvy, i_height );
515     else
516         mc_chroma_2xh( dstu, dstv, i_dst_stride, src, i_src_stride,
517                        mvx, mvy, i_height );
518 }
519
520 #define HPEL_FILTER_1( t1v, t2v, t3v, t4v, t5v, t6v ) \
521 {                                                     \
522     t1v = vec_add( t1v, t6v );                        \
523     t2v = vec_add( t2v, t5v );                        \
524     t3v = vec_add( t3v, t4v );                        \
525                                                       \
526     t1v = vec_sub( t1v, t2v );   /* (a-b) */          \
527     t2v = vec_sub( t2v, t3v );   /* (b-c) */          \
528     t2v = vec_sl(  t2v, twov );  /* (b-c)*4 */        \
529     t1v = vec_sub( t1v, t2v );   /* a-5*b+4*c */      \
530     t3v = vec_sl(  t3v, fourv ); /* 16*c */           \
531     t1v = vec_add( t1v, t3v );   /* a-5*b+20*c */     \
532 }
533
534 #define HPEL_FILTER_2( t1v, t2v, t3v, t4v, t5v, t6v ) \
535 {                                                     \
536     t1v = vec_add( t1v, t6v );                        \
537     t2v = vec_add( t2v, t5v );                        \
538     t3v = vec_add( t3v, t4v );                        \
539                                                       \
540     t1v = vec_sub( t1v, t2v );  /* (a-b) */           \
541     t1v = vec_sra( t1v, twov ); /* (a-b)/4 */         \
542     t1v = vec_sub( t1v, t2v );  /* (a-b)/4-b */       \
543     t1v = vec_add( t1v, t3v );  /* (a-b)/4-b+c */     \
544     t1v = vec_sra( t1v, twov ); /* ((a-b)/4-b+c)/4 */ \
545     t1v = vec_add( t1v, t3v );  /* ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16 */ \
546 }
547
548 #define HPEL_FILTER_HORIZONTAL()                             \
549 {                                                            \
550     VEC_LOAD_G( &src[x- 2+i_stride*y], src1v, 16, vec_u8_t); \
551     VEC_LOAD_G( &src[x+14+i_stride*y], src6v, 16, vec_u8_t); \
552                                                              \
553     src2v = vec_sld( src1v, src6v,  1 );                     \
554     src3v = vec_sld( src1v, src6v,  2 );                     \
555     src4v = vec_sld( src1v, src6v,  3 );                     \
556     src5v = vec_sld( src1v, src6v,  4 );                     \
557     src6v = vec_sld( src1v, src6v,  5 );                     \
558                                                              \
559     temp1v = vec_u8_to_s16_h( src1v );                       \
560     temp2v = vec_u8_to_s16_h( src2v );                       \
561     temp3v = vec_u8_to_s16_h( src3v );                       \
562     temp4v = vec_u8_to_s16_h( src4v );                       \
563     temp5v = vec_u8_to_s16_h( src5v );                       \
564     temp6v = vec_u8_to_s16_h( src6v );                       \
565                                                              \
566     HPEL_FILTER_1( temp1v, temp2v, temp3v,                   \
567                    temp4v, temp5v, temp6v );                 \
568                                                              \
569     dest1v = vec_add( temp1v, sixteenv );                    \
570     dest1v = vec_sra( dest1v, fivev );                       \
571                                                              \
572     temp1v = vec_u8_to_s16_l( src1v );                       \
573     temp2v = vec_u8_to_s16_l( src2v );                       \
574     temp3v = vec_u8_to_s16_l( src3v );                       \
575     temp4v = vec_u8_to_s16_l( src4v );                       \
576     temp5v = vec_u8_to_s16_l( src5v );                       \
577     temp6v = vec_u8_to_s16_l( src6v );                       \
578                                                              \
579     HPEL_FILTER_1( temp1v, temp2v, temp3v,                   \
580                    temp4v, temp5v, temp6v );                 \
581                                                              \
582     dest2v = vec_add( temp1v, sixteenv );                    \
583     dest2v = vec_sra( dest2v, fivev );                       \
584                                                              \
585     destv = vec_packsu( dest1v, dest2v );                    \
586                                                              \
587     VEC_STORE16( destv, &dsth[x+i_stride*y], dsth );         \
588 }
589
590 #define HPEL_FILTER_VERTICAL()                                    \
591 {                                                                 \
592     VEC_LOAD( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src ); \
593     VEC_LOAD( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src ); \
594     VEC_LOAD( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src ); \
595     VEC_LOAD( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src ); \
596     VEC_LOAD( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src ); \
597     VEC_LOAD( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src ); \
598                                                                   \
599     temp1v = vec_u8_to_s16_h( src1v );                            \
600     temp2v = vec_u8_to_s16_h( src2v );                            \
601     temp3v = vec_u8_to_s16_h( src3v );                            \
602     temp4v = vec_u8_to_s16_h( src4v );                            \
603     temp5v = vec_u8_to_s16_h( src5v );                            \
604     temp6v = vec_u8_to_s16_h( src6v );                            \
605                                                                   \
606     HPEL_FILTER_1( temp1v, temp2v, temp3v,                        \
607                    temp4v, temp5v, temp6v );                      \
608                                                                   \
609     dest1v = vec_add( temp1v, sixteenv );                         \
610     dest1v = vec_sra( dest1v, fivev );                            \
611                                                                   \
612     temp4v = vec_u8_to_s16_l( src1v );                            \
613     temp5v = vec_u8_to_s16_l( src2v );                            \
614     temp6v = vec_u8_to_s16_l( src3v );                            \
615     temp7v = vec_u8_to_s16_l( src4v );                            \
616     temp8v = vec_u8_to_s16_l( src5v );                            \
617     temp9v = vec_u8_to_s16_l( src6v );                            \
618                                                                   \
619     HPEL_FILTER_1( temp4v, temp5v, temp6v,                        \
620                    temp7v, temp8v, temp9v );                      \
621                                                                   \
622     dest2v = vec_add( temp4v, sixteenv );                         \
623     dest2v = vec_sra( dest2v, fivev );                            \
624                                                                   \
625     destv = vec_packsu( dest1v, dest2v );                         \
626                                                                   \
627     VEC_STORE16( destv, &dstv[x+i_stride*y], dsth );              \
628 }
629
630 #define HPEL_FILTER_CENTRAL()                           \
631 {                                                       \
632     temp1v = vec_sld( tempav, tempbv, 12 );             \
633     temp2v = vec_sld( tempav, tempbv, 14 );             \
634     temp3v = tempbv;                                    \
635     temp4v = vec_sld( tempbv, tempcv,  2 );             \
636     temp5v = vec_sld( tempbv, tempcv,  4 );             \
637     temp6v = vec_sld( tempbv, tempcv,  6 );             \
638                                                         \
639     HPEL_FILTER_2( temp1v, temp2v, temp3v,              \
640                    temp4v, temp5v, temp6v );            \
641                                                         \
642     dest1v = vec_add( temp1v, thirtytwov );             \
643     dest1v = vec_sra( dest1v, sixv );                   \
644                                                         \
645     temp1v = vec_sld( tempbv, tempcv, 12 );             \
646     temp2v = vec_sld( tempbv, tempcv, 14 );             \
647     temp3v = tempcv;                                    \
648     temp4v = vec_sld( tempcv, tempdv,  2 );             \
649     temp5v = vec_sld( tempcv, tempdv,  4 );             \
650     temp6v = vec_sld( tempcv, tempdv,  6 );             \
651                                                         \
652     HPEL_FILTER_2( temp1v, temp2v, temp3v,              \
653                    temp4v, temp5v, temp6v );            \
654                                                         \
655     dest2v = vec_add( temp1v, thirtytwov );             \
656     dest2v = vec_sra( dest2v, sixv );                   \
657                                                         \
658     destv = vec_packsu( dest1v, dest2v );               \
659                                                         \
660     VEC_STORE16( destv, &dstc[x-16+i_stride*y], dsth ); \
661 }
662
663 void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
664                                intptr_t i_stride, int i_width, int i_height, int16_t *buf )
665 {
666     vec_u8_t destv;
667     vec_u8_t src1v, src2v, src3v, src4v, src5v, src6v;
668     vec_s16_t dest1v, dest2v;
669     vec_s16_t temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, temp8v, temp9v;
670     vec_s16_t tempav, tempbv, tempcv, tempdv, tempev;
671
672     PREP_LOAD;
673     PREP_LOAD_SRC( src);
674     PREP_STORE16;
675     PREP_STORE16_DST( dsth );
676     LOAD_ZERO;
677
678     vec_u16_t twov, fourv, fivev, sixv;
679     vec_s16_t sixteenv, thirtytwov;
680     vec_u16_u temp_u;
681
682     temp_u.s[0]=2;
683     twov = vec_splat( temp_u.v, 0 );
684     temp_u.s[0]=4;
685     fourv = vec_splat( temp_u.v, 0 );
686     temp_u.s[0]=5;
687     fivev = vec_splat( temp_u.v, 0 );
688     temp_u.s[0]=6;
689     sixv = vec_splat( temp_u.v, 0 );
690     temp_u.s[0]=16;
691     sixteenv = (vec_s16_t)vec_splat( temp_u.v, 0 );
692     temp_u.s[0]=32;
693     thirtytwov = (vec_s16_t)vec_splat( temp_u.v, 0 );
694
695     for( int y = 0; y < i_height; y++ )
696     {
697         int x = 0;
698
699         /* horizontal_filter */
700         HPEL_FILTER_HORIZONTAL();
701
702         /* vertical_filter */
703         HPEL_FILTER_VERTICAL();
704
705         /* central_filter */
706         tempav = tempcv;
707         tempbv = tempdv;
708         tempcv = vec_splat( temp1v, 0 ); /* first only */
709         tempdv = temp1v;
710         tempev = temp4v;
711
712         for( x = 16; x < i_width; x+=16 )
713         {
714             /* horizontal_filter */
715             HPEL_FILTER_HORIZONTAL();
716
717             /* vertical_filter */
718             HPEL_FILTER_VERTICAL();
719
720             /* central_filter */
721             tempav = tempcv;
722             tempbv = tempdv;
723             tempcv = tempev;
724             tempdv = temp1v;
725             tempev = temp4v;
726
727             HPEL_FILTER_CENTRAL();
728         }
729
730         /* Partial vertical filter */
731         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src );
732         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src );
733         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src );
734         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src );
735         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src );
736         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src );
737
738         temp1v = vec_u8_to_s16_h( src1v );
739         temp2v = vec_u8_to_s16_h( src2v );
740         temp3v = vec_u8_to_s16_h( src3v );
741         temp4v = vec_u8_to_s16_h( src4v );
742         temp5v = vec_u8_to_s16_h( src5v );
743         temp6v = vec_u8_to_s16_h( src6v );
744
745         HPEL_FILTER_1( temp1v, temp2v, temp3v, temp4v, temp5v, temp6v );
746
747         /* central_filter */
748         tempav = tempcv;
749         tempbv = tempdv;
750         tempcv = tempev;
751         tempdv = temp1v;
752         /* tempev is not used */
753
754         HPEL_FILTER_CENTRAL();
755     }
756 }
757
758 static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
759                                             intptr_t src_stride, intptr_t dst_stride, int width, int height )
760 {
761     int w = width >> 4;
762     int end = (width & 15);
763     vec_u8_t src0v, src1v, src2v;
764     vec_u8_t lv, hv, src1p1v;
765     vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
766     static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
767
768     for( int y = 0; y < height; y++ )
769     {
770         int x;
771         uint8_t *src1 = src0+src_stride;
772         uint8_t *src2 = src1+src_stride;
773
774         src0v = vec_ld(0, src0);
775         src1v = vec_ld(0, src1);
776         src2v = vec_ld(0, src2);
777
778         avg0v = vec_avg(src0v, src1v);
779         avg1v = vec_avg(src1v, src2v);
780
781         for( x = 0; x < w; x++ )
782         {
783             lv = vec_ld(16*(x*2+1), src0);
784             src1v = vec_ld(16*(x*2+1), src1);
785             avghv = vec_avg(lv, src1v);
786
787             lv = vec_ld(16*(x*2+2), src0);
788             src1p1v = vec_ld(16*(x*2+2), src1);
789             avghp1v = vec_avg(lv, src1p1v);
790
791             avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
792             avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
793
794             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
795             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
796
797             avg0v = avghp1v;
798
799             hv = vec_ld(16*(x*2+1), src2);
800             avghv = vec_avg(src1v, hv);
801
802             hv = vec_ld(16*(x*2+2), src2);
803             avghp1v = vec_avg(src1p1v, hv);
804
805             avgleftv = vec_avg(vec_sld(avg1v, avghv, 1), avg1v);
806             avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
807
808             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
809             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
810
811             avg1v = avghp1v;
812
813         }
814         if( end )
815         {
816             lv = vec_ld(16*(x*2+1), src0);
817             src1v = vec_ld(16*(x*2+1), src1);
818             avghv = vec_avg(lv, src1v);
819
820             lv = vec_ld(16*(x*2+1), src2);
821             avghp1v = vec_avg(src1v, lv);
822
823             avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
824             avgrightv = vec_avg(vec_sld(avg1v, avghp1v, 1), avg1v);
825
826             lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
827             hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
828
829             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
830             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
831             vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dsth);
832             vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dsth);
833
834             lv = vec_sld(lv, lv, 8);
835             hv = vec_sld(hv, hv, 8);
836
837             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dstv);
838             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dstv);
839             vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dstc);
840             vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dstc);
841         }
842
843         src0 += src_stride*2;
844         dst0 += dst_stride;
845         dsth += dst_stride;
846         dstv += dst_stride;
847         dstc += dst_stride;
848     }
849 }
850
851 static void mc_weight_w2_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
852                                   const x264_weight_t *weight, int i_height )
853 {
854     LOAD_ZERO;
855     PREP_LOAD;
856     PREP_LOAD_SRC( src );
857     vec_u8_t srcv;
858     vec_s16_t weightv;
859     vec_s16_t scalev, offsetv, denomv, roundv;
860     vec_s16_u loadv;
861
862     int denom = weight->i_denom;
863
864     loadv.s[0] = weight->i_scale;
865     scalev = vec_splat( loadv.v, 0 );
866
867     loadv.s[0] = weight->i_offset;
868     offsetv = vec_splat( loadv.v, 0 );
869
870     if( denom >= 1 )
871     {
872         loadv.s[0] = denom;
873         denomv = vec_splat( loadv.v, 0 );
874
875         loadv.s[0] = 1<<(denom - 1);
876         roundv = vec_splat( loadv.v, 0 );
877
878         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
879         {
880             VEC_LOAD( src, srcv, 2, vec_u8_t, src );
881             weightv = vec_u8_to_s16( srcv );
882
883             weightv = vec_mladd( weightv, scalev, roundv );
884             weightv = vec_sra( weightv, (vec_u16_t)denomv );
885             weightv = vec_add( weightv, offsetv );
886
887             srcv = vec_packsu( weightv, zero_s16v );
888             vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
889         }
890     }
891     else
892     {
893         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
894         {
895             VEC_LOAD( src, srcv, 2, vec_u8_t, src );
896             weightv = vec_u8_to_s16( srcv );
897
898             weightv = vec_mladd( weightv, scalev, offsetv );
899
900             srcv = vec_packsu( weightv, zero_s16v );
901             vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
902         }
903     }
904 }
905 static void mc_weight_w4_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
906                                   const x264_weight_t *weight, int i_height )
907 {
908     LOAD_ZERO;
909     PREP_LOAD;
910     PREP_LOAD_SRC( src );
911     vec_u8_t srcv;
912     vec_s16_t weightv;
913     vec_s16_t scalev, offsetv, denomv, roundv;
914     vec_s16_u loadv;
915
916     int denom = weight->i_denom;
917
918     loadv.s[0] = weight->i_scale;
919     scalev = vec_splat( loadv.v, 0 );
920
921     loadv.s[0] = weight->i_offset;
922     offsetv = vec_splat( loadv.v, 0 );
923
924     if( denom >= 1 )
925     {
926         loadv.s[0] = denom;
927         denomv = vec_splat( loadv.v, 0 );
928
929         loadv.s[0] = 1<<(denom - 1);
930         roundv = vec_splat( loadv.v, 0 );
931
932         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
933         {
934             VEC_LOAD( src, srcv, 4, vec_u8_t, src );
935             weightv = vec_u8_to_s16( srcv );
936
937             weightv = vec_mladd( weightv, scalev, roundv );
938             weightv = vec_sra( weightv, (vec_u16_t)denomv );
939             weightv = vec_add( weightv, offsetv );
940
941             srcv = vec_packsu( weightv, zero_s16v );
942             vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
943         }
944     }
945     else
946     {
947         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
948         {
949             VEC_LOAD( src, srcv, 4, vec_u8_t, src );
950             weightv = vec_u8_to_s16( srcv );
951
952             weightv = vec_mladd( weightv, scalev, offsetv );
953
954             srcv = vec_packsu( weightv, zero_s16v );
955             vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
956         }
957     }
958 }
959 static void mc_weight_w8_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
960                                   const x264_weight_t *weight, int i_height )
961 {
962     LOAD_ZERO;
963     PREP_LOAD;
964     PREP_LOAD_SRC( src );
965     PREP_STORE8;
966     vec_u8_t srcv;
967     vec_s16_t weightv;
968     vec_s16_t scalev, offsetv, denomv, roundv;
969     vec_s16_u loadv;
970
971     int denom = weight->i_denom;
972
973     loadv.s[0] = weight->i_scale;
974     scalev = vec_splat( loadv.v, 0 );
975
976     loadv.s[0] = weight->i_offset;
977     offsetv = vec_splat( loadv.v, 0 );
978
979     if( denom >= 1 )
980     {
981         loadv.s[0] = denom;
982         denomv = vec_splat( loadv.v, 0 );
983
984         loadv.s[0] = 1<<(denom - 1);
985         roundv = vec_splat( loadv.v, 0 );
986
987         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
988         {
989             VEC_LOAD( src, srcv, 8, vec_u8_t, src );
990             weightv = vec_u8_to_s16( srcv );
991
992             weightv = vec_mladd( weightv, scalev, roundv );
993             weightv = vec_sra( weightv, (vec_u16_t)denomv );
994             weightv = vec_add( weightv, offsetv );
995
996             srcv = vec_packsu( weightv, zero_s16v );
997             VEC_STORE8( srcv, dst );
998         }
999     }
1000     else
1001     {
1002         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1003         {
1004             VEC_LOAD( src, srcv, 8, vec_u8_t, src );
1005             weightv = vec_u8_to_s16( srcv );
1006
1007             weightv = vec_mladd( weightv, scalev, offsetv );
1008
1009             srcv = vec_packsu( weightv, zero_s16v );
1010             VEC_STORE8( srcv, dst );
1011         }
1012     }
1013 }
1014 static void mc_weight_w16_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
1015                                    const x264_weight_t *weight, int i_height )
1016 {
1017     LOAD_ZERO;
1018     PREP_LOAD;
1019     PREP_LOAD_SRC( src );
1020     vec_u8_t srcv;
1021     vec_s16_t weight_lv, weight_hv;
1022     vec_s16_t scalev, offsetv, denomv, roundv;
1023     vec_s16_u loadv;
1024
1025     int denom = weight->i_denom;
1026
1027     loadv.s[0] = weight->i_scale;
1028     scalev = vec_splat( loadv.v, 0 );
1029
1030     loadv.s[0] = weight->i_offset;
1031     offsetv = vec_splat( loadv.v, 0 );
1032
1033     if( denom >= 1 )
1034     {
1035         loadv.s[0] = denom;
1036         denomv = vec_splat( loadv.v, 0 );
1037
1038         loadv.s[0] = 1<<(denom - 1);
1039         roundv = vec_splat( loadv.v, 0 );
1040
1041         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1042         {
1043             VEC_LOAD( src, srcv, 16, vec_u8_t, src );
1044             weight_hv = vec_u8_to_s16_h( srcv );
1045             weight_lv = vec_u8_to_s16_l( srcv );
1046
1047             weight_hv = vec_mladd( weight_hv, scalev, roundv );
1048             weight_lv = vec_mladd( weight_lv, scalev, roundv );
1049             weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
1050             weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
1051             weight_hv = vec_add( weight_hv, offsetv );
1052             weight_lv = vec_add( weight_lv, offsetv );
1053
1054             srcv = vec_packsu( weight_hv, weight_lv );
1055             vec_st( srcv, 0, dst );
1056         }
1057     }
1058     else
1059     {
1060         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1061         {
1062             VEC_LOAD( src, srcv, 16, vec_u8_t, src );
1063             weight_hv = vec_u8_to_s16_h( srcv );
1064             weight_lv = vec_u8_to_s16_l( srcv );
1065
1066             weight_hv = vec_mladd( weight_hv, scalev, offsetv );
1067             weight_lv = vec_mladd( weight_lv, scalev, offsetv );
1068
1069             srcv = vec_packsu( weight_hv, weight_lv );
1070             vec_st( srcv, 0, dst );
1071         }
1072     }
1073 }
1074 static void mc_weight_w20_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
1075                                    const x264_weight_t *weight, int i_height )
1076 {
1077     LOAD_ZERO;
1078     PREP_LOAD_SRC( src );
1079     vec_u8_t src_1v, src_2v, src_3v;
1080     vec_s16_t weight_lv, weight_hv, weight_3v;
1081     vec_s16_t scalev, offsetv, denomv, roundv;
1082     vec_s16_u loadv;
1083
1084     int denom = weight->i_denom;
1085
1086     loadv.s[0] = weight->i_scale;
1087     scalev = vec_splat( loadv.v, 0 );
1088
1089     loadv.s[0] = weight->i_offset;
1090     offsetv = vec_splat( loadv.v, 0 );
1091
1092     if( denom >= 1 )
1093     {
1094         loadv.s[0] = denom;
1095         denomv = vec_splat( loadv.v, 0 );
1096
1097         loadv.s[0] = 1<<(denom - 1);
1098         roundv = vec_splat( loadv.v, 0 );
1099
1100         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1101         {
1102             src_1v = vec_ld( 0,  src );
1103             src_2v = vec_ld( 16, src );
1104             src_3v = vec_ld( 19, src );
1105             src_1v = vec_perm( src_1v, src_2v, _src_ );
1106             src_3v = vec_perm( src_2v, src_3v, _src_ );
1107             weight_hv = vec_u8_to_s16_h( src_1v );
1108             weight_lv = vec_u8_to_s16_l( src_1v );
1109             weight_3v = vec_u8_to_s16_h( src_3v );
1110
1111             weight_hv = vec_mladd( weight_hv, scalev, roundv );
1112             weight_lv = vec_mladd( weight_lv, scalev, roundv );
1113             weight_3v = vec_mladd( weight_3v, scalev, roundv );
1114             weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
1115             weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
1116             weight_3v = vec_sra( weight_3v, (vec_u16_t)denomv );
1117             weight_hv = vec_add( weight_hv, offsetv );
1118             weight_lv = vec_add( weight_lv, offsetv );
1119             weight_3v = vec_add( weight_3v, offsetv );
1120
1121             src_1v = vec_packsu( weight_hv, weight_lv );
1122             src_3v = vec_packsu( weight_3v, zero_s16v );
1123             vec_st( src_1v, 0, dst );
1124             vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
1125         }
1126     }
1127     else
1128     {
1129         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1130         {
1131             src_1v = vec_ld( 0,  src );
1132             src_2v = vec_ld( 16, src );
1133             src_3v = vec_ld( 19, src );
1134             src_1v = vec_perm( src_1v, src_2v, _src_ );
1135             src_3v = vec_perm( src_2v, src_3v, _src_ );
1136             weight_hv = vec_u8_to_s16_h( src_1v );
1137             weight_lv = vec_u8_to_s16_l( src_1v );
1138             weight_3v = vec_u8_to_s16_h( src_3v );
1139
1140             weight_hv = vec_mladd( weight_hv, scalev, offsetv );
1141             weight_lv = vec_mladd( weight_lv, scalev, offsetv );
1142             weight_3v = vec_mladd( weight_3v, scalev, offsetv );
1143
1144             src_1v = vec_packsu( weight_hv, weight_lv );
1145             src_3v = vec_packsu( weight_3v, zero_s16v );
1146             vec_st( src_1v, 0, dst );
1147             vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
1148         }
1149     }
1150 }
1151
1152 static weight_fn_t x264_mc_weight_wtab_altivec[6] =
1153 {
1154     mc_weight_w2_altivec,
1155     mc_weight_w4_altivec,
1156     mc_weight_w8_altivec,
1157     mc_weight_w16_altivec,
1158     mc_weight_w16_altivec,
1159     mc_weight_w20_altivec,
1160 };
1161
1162 #endif // !HIGH_BIT_DEPTH
1163
1164 void x264_mc_altivec_init( x264_mc_functions_t *pf )
1165 {
1166 #if !HIGH_BIT_DEPTH
1167     pf->mc_luma   = mc_luma_altivec;
1168     pf->get_ref   = get_ref_altivec;
1169     pf->mc_chroma = mc_chroma_altivec;
1170
1171     pf->copy_16x16_unaligned = x264_mc_copy_w16_altivec;
1172     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_altivec;
1173
1174     pf->hpel_filter = x264_hpel_filter_altivec;
1175     pf->frame_init_lowres_core = frame_init_lowres_core_altivec;
1176
1177     pf->weight = x264_mc_weight_wtab_altivec;
1178 #endif // !HIGH_BIT_DEPTH
1179 }