]> git.sesse.net Git - x264/blob - common/ppc/mc.c
e169166436a294b01e182448259b0e0173c2af89
[x264] / common / ppc / mc.c
1 /*****************************************************************************
2  * mc.c: ppc motion compensation
3  *****************************************************************************
4  * Copyright (C) 2003-2016 x264 project
5  *
6  * Authors: Eric Petit <eric.petit@lapsus.org>
7  *          Guillaume Poirier <gpoirier@mplayerhq.hu>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
22  *
23  * This program is also available under a commercial proprietary license.
24  * For more information, contact us at licensing@x264.com.
25  *****************************************************************************/
26
27 #include "common/common.h"
28 #include "mc.h"
29 #include "ppccommon.h"
30
31 #if !HIGH_BIT_DEPTH
32 typedef void (*pf_mc_t)( uint8_t *src, intptr_t i_src,
33                          uint8_t *dst, intptr_t i_dst, int i_height );
34
35 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
36 {
37     return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] +
38            pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] +
39            pix[ 3*i_pix_next];
40 }
41
42 static inline int x264_tapfilter1( uint8_t *pix )
43 {
44     return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] +
45            pix[ 3];
46 }
47
48 static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst,  intptr_t i_dst,
49                                                uint8_t *src1, intptr_t i_src1,
50                                                uint8_t *src2, int i_height )
51 {
52     for( int y = 0; y < i_height; y++ )
53     {
54         for( int x = 0; x < 4; x++ )
55             dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
56         dst  += i_dst;
57         src1 += i_src1;
58         src2 += i_src1;
59     }
60 }
61
62 static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst,  intptr_t i_dst,
63                                                uint8_t *src1, intptr_t i_src1,
64                                                uint8_t *src2, int i_height )
65 {
66     vec_u8_t src1v, src2v;
67     PREP_LOAD;
68     PREP_STORE8;
69     PREP_LOAD_SRC( src1 );
70     PREP_LOAD_SRC( src2 );
71
72     for( int y = 0; y < i_height; y++ )
73     {
74         VEC_LOAD( src1, src1v, 8, vec_u8_t, src1 );
75         VEC_LOAD( src2, src2v, 8, vec_u8_t, src2 );
76         src1v = vec_avg( src1v, src2v );
77         VEC_STORE8( src1v, dst );
78
79         dst  += i_dst;
80         src1 += i_src1;
81         src2 += i_src1;
82     }
83 }
84
85 static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst,  intptr_t i_dst,
86                                                 uint8_t *src1, intptr_t i_src1,
87                                                 uint8_t *src2, int i_height )
88 {
89     vec_u8_t src1v, src2v;
90     PREP_LOAD;
91     PREP_LOAD_SRC( src1 );
92     PREP_LOAD_SRC( src2 );
93
94     for( int y = 0; y < i_height; y++ )
95     {
96         VEC_LOAD( src1, src1v, 16, vec_u8_t, src1 );
97         VEC_LOAD( src2, src2v, 16, vec_u8_t, src2 );
98         src1v = vec_avg( src1v, src2v );
99         vec_st(src1v, 0, dst);
100
101         dst  += i_dst;
102         src1 += i_src1;
103         src2 += i_src1;
104     }
105 }
106
107 static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst,  intptr_t i_dst,
108                                                 uint8_t *src1, intptr_t i_src1,
109                                                 uint8_t *src2, int i_height )
110 {
111     x264_pixel_avg2_w16_altivec(dst, i_dst, src1, i_src1, src2, i_height);
112     x264_pixel_avg2_w4_altivec(dst+16, i_dst, src1+16, i_src1, src2+16, i_height);
113 }
114
115 /* mc_copy: plain c */
116
117 #define MC_COPY( name, a )                                \
118 static void name( uint8_t *dst, intptr_t i_dst,           \
119                   uint8_t *src, intptr_t i_src, int i_height ) \
120 {                                                         \
121     int y;                                                \
122     for( y = 0; y < i_height; y++ )                       \
123     {                                                     \
124         memcpy( dst, src, a );                            \
125         src += i_src;                                     \
126         dst += i_dst;                                     \
127     }                                                     \
128 }
129 MC_COPY( x264_mc_copy_w4_altivec,  4  )
130 MC_COPY( x264_mc_copy_w8_altivec,  8  )
131
132 static void x264_mc_copy_w16_altivec( uint8_t *dst, intptr_t i_dst,
133                                       uint8_t *src, intptr_t i_src, int i_height )
134 {
135     vec_u8_t cpyV;
136     PREP_LOAD;
137     PREP_LOAD_SRC( src );
138
139     for( int y = 0; y < i_height; y++ )
140     {
141         VEC_LOAD( src, cpyV, 16, vec_u8_t, src );
142         vec_st(cpyV, 0, dst);
143
144         src += i_src;
145         dst += i_dst;
146     }
147 }
148
149
150 static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, intptr_t i_dst,
151                                               uint8_t *src, intptr_t i_src, int i_height )
152 {
153     for( int y = 0; y < i_height; ++y )
154     {
155         vec_u8_t cpyV = vec_ld( 0, src );
156         vec_st(cpyV, 0, dst);
157
158         src += i_src;
159         dst += i_dst;
160     }
161 }
162
163
164 static void mc_luma_altivec( uint8_t *dst,    intptr_t i_dst_stride,
165                              uint8_t *src[4], intptr_t i_src_stride,
166                              int mvx, int mvy,
167                              int i_width, int i_height, const x264_weight_t *weight )
168 {
169     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
170     intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
171     uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
172     if( qpel_idx & 5 ) /* qpel interpolation needed */
173     {
174         uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
175
176         switch( i_width )
177         {
178             case 4:
179                 x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
180                 break;
181             case 8:
182                 x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
183                 break;
184             case 16:
185             default:
186                 x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
187         }
188         if( weight->weightfn )
189             weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
190     }
191     else if( weight->weightfn )
192         weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
193     else
194     {
195         switch( i_width )
196         {
197             case 4:
198                 x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
199                 break;
200             case 8:
201                 x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
202                 break;
203             case 16:
204                 x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
205                 break;
206         }
207     }
208 }
209
210
211
212 static uint8_t *get_ref_altivec( uint8_t *dst,   intptr_t *i_dst_stride,
213                                  uint8_t *src[4], intptr_t i_src_stride,
214                                  int mvx, int mvy,
215                                  int i_width, int i_height, const x264_weight_t *weight )
216 {
217     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
218     intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
219     uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
220     if( qpel_idx & 5 ) /* qpel interpolation needed */
221     {
222         uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
223         switch( i_width )
224         {
225             case 4:
226                 x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
227                 break;
228             case 8:
229                 x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
230                 break;
231             case 12:
232             case 16:
233             default:
234                 x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
235                 break;
236             case 20:
237                 x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
238                 break;
239         }
240         if( weight->weightfn )
241             weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
242         return dst;
243     }
244     else if( weight->weightfn )
245     {
246         weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
247         return dst;
248     }
249     else
250     {
251         *i_dst_stride = i_src_stride;
252         return src1;
253     }
254 }
255
256 static void mc_chroma_2xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
257                            uint8_t *src, intptr_t i_src_stride,
258                            int mvx, int mvy, int i_height )
259 {
260     uint8_t *srcp;
261     int d8x = mvx&0x07;
262     int d8y = mvy&0x07;
263
264     int cA = (8-d8x)*(8-d8y);
265     int cB = d8x    *(8-d8y);
266     int cC = (8-d8x)*d8y;
267     int cD = d8x    *d8y;
268
269     src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
270     srcp = &src[i_src_stride];
271
272     for( int y = 0; y < i_height; y++ )
273     {
274         dstu[0] = ( cA*src[0] + cB*src[2] + cC*srcp[0] + cD*srcp[2] + 32 ) >> 6;
275         dstv[0] = ( cA*src[1] + cB*src[3] + cC*srcp[1] + cD*srcp[3] + 32 ) >> 6;
276         dstu[1] = ( cA*src[2] + cB*src[4] + cC*srcp[2] + cD*srcp[4] + 32 ) >> 6;
277         dstv[1] = ( cA*src[3] + cB*src[5] + cC*srcp[3] + cD*srcp[5] + 32 ) >> 6;
278
279         src  += i_src_stride;
280         srcp += i_src_stride;
281         dstu += i_dst_stride;
282         dstv += i_dst_stride;
283     }
284  }
285
286 #ifdef WORDS_BIGENDIAN
287 #define VSLD(a,b,n) vec_sld(a,b,n)
288 #else
289 #define VSLD(a,b,n) vec_sld(b,a,16-n)
290 #endif
291
292 static void mc_chroma_altivec_4xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
293                                    uint8_t *src, intptr_t i_src_stride,
294                                    int mvx, int mvy, int i_height )
295 {
296     uint8_t *srcp;
297     int d8x = mvx & 0x07;
298     int d8y = mvy & 0x07;
299
300     ALIGNED_16( uint16_t coeff[4] );
301     coeff[0] = (8-d8x)*(8-d8y);
302     coeff[1] = d8x    *(8-d8y);
303     coeff[2] = (8-d8x)*d8y;
304     coeff[3] = d8x    *d8y;
305
306     src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
307     srcp = &src[i_src_stride];
308
309     LOAD_ZERO;
310     PREP_LOAD;
311     PREP_LOAD_SRC( src );
312     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
313     vec_u8_t    src2v_8, dstuv, dstvv;
314     vec_u16_t   src0v_16, src1v_16, src2v_16, src3v_16, dstv16;
315     vec_u16_t   shiftv, k32v;
316
317 #ifdef WORDS_BIGENDIAN
318     static const vec_u8_t perm0v = CV(1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13);
319     static const vec_u8_t perm1v = CV(3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15);
320 #else
321     static const vec_u8_t perm0v = CV(0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12);
322     static const vec_u8_t perm1v = CV(2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14);
323 #endif
324
325     coeff0v = vec_ld( 0, coeff );
326     coeff3v = vec_splat( coeff0v, 3 );
327     coeff2v = vec_splat( coeff0v, 2 );
328     coeff1v = vec_splat( coeff0v, 1 );
329     coeff0v = vec_splat( coeff0v, 0 );
330     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
331     shiftv  = vec_splat_u16( 6 );
332
333     VEC_LOAD( src, src2v_8, 9, vec_u8_t, src );
334     src2v_16 = vec_u8_to_u16( src2v_8 );
335     src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
336
337     for( int y = 0; y < i_height; y += 2 )
338     {
339         src0v_16 = src2v_16;
340         src1v_16 = src3v_16;
341         VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
342         src2v_16 = vec_u8_to_u16( src2v_8 );
343         src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
344
345         dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
346         dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
347         dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
348         dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
349
350         dstv16 = vec_sr( dstv16, shiftv );
351
352         dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
353         dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
354         vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
355         vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
356
357         srcp += i_src_stride;
358         dstu += i_dst_stride;
359         dstv += i_dst_stride;
360
361         src0v_16 = src2v_16;
362         src1v_16 = src3v_16;
363         VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
364         src2v_16 = vec_u8_to_u16( src2v_8 );
365         src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
366
367         dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
368         dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
369         dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
370         dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
371
372         dstv16 = vec_sr( dstv16, shiftv );
373
374         dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
375         dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
376         vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
377         vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
378
379         srcp += i_src_stride;
380         dstu += i_dst_stride;
381         dstv += i_dst_stride;
382     }
383 }
384
385 static void mc_chroma_altivec_8xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
386                                    uint8_t *src, intptr_t i_src_stride,
387                                    int mvx, int mvy, int i_height )
388 {
389     uint8_t *srcp;
390     int d8x = mvx & 0x07;
391     int d8y = mvy & 0x07;
392
393     ALIGNED_16( uint16_t coeff[4] );
394     coeff[0] = (8-d8x)*(8-d8y);
395     coeff[1] = d8x    *(8-d8y);
396     coeff[2] = (8-d8x)*d8y;
397     coeff[3] = d8x    *d8y;
398
399     src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
400     srcp = &src[i_src_stride];
401
402     LOAD_ZERO;
403     PREP_LOAD;
404     PREP_LOAD_SRC( src );
405     PREP_STORE8;
406     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
407     vec_u8_t    src0v_8, src1v_8, src2v_8, src3v_8;
408     vec_u8_t    dstuv, dstvv;
409     vec_u16_t   src0v_16h, src1v_16h, src2v_16h, src3v_16h, dstv_16h;
410     vec_u16_t   src0v_16l, src1v_16l, src2v_16l, src3v_16l, dstv_16l;
411     vec_u16_t   shiftv, k32v;
412
413     coeff0v = vec_ld( 0, coeff );
414     coeff3v = vec_splat( coeff0v, 3 );
415     coeff2v = vec_splat( coeff0v, 2 );
416     coeff1v = vec_splat( coeff0v, 1 );
417     coeff0v = vec_splat( coeff0v, 0 );
418     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
419     shiftv  = vec_splat_u16( 6 );
420
421 #ifdef WORDS_BIGENDIAN
422     static const vec_u8_t perm0v = CV(1,5,9,13,17,21,25,29,0,0,0,0,0,0,0,0);
423     static const vec_u8_t perm1v = CV(3,7,11,15,19,23,27,31,0,0,0,0,0,0,0,0);
424 #else
425     static const vec_u8_t perm0v = CV(0,4,8,12,16,20,24,28,1,1,1,1,1,1,1,1);
426     static const vec_u8_t perm1v = CV(2,6,10,14,18,22,26,30,1,1,1,1,1,1,1,1);
427 #endif
428
429     VEC_LOAD( src, src2v_8, 16, vec_u8_t, src );
430     VEC_LOAD( src+16, src3v_8, 2, vec_u8_t, src );
431     src3v_8 = VSLD( src2v_8, src3v_8, 2 );
432
433     for( int y = 0; y < i_height; y += 2 )
434     {
435         src0v_8 = src2v_8;
436         src1v_8 = src3v_8;
437         VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
438         VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
439
440         src3v_8 = VSLD( src2v_8, src3v_8, 2 );
441
442         src0v_16h = vec_u8_to_u16_h( src0v_8 );
443         src0v_16l = vec_u8_to_u16_l( src0v_8 );
444         src1v_16h = vec_u8_to_u16_h( src1v_8 );
445         src1v_16l = vec_u8_to_u16_l( src1v_8 );
446         src2v_16h = vec_u8_to_u16_h( src2v_8 );
447         src2v_16l = vec_u8_to_u16_l( src2v_8 );
448         src3v_16h = vec_u8_to_u16_h( src3v_8 );
449         src3v_16l = vec_u8_to_u16_l( src3v_8 );
450
451         dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
452         dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
453         dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
454         dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
455         dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
456         dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
457         dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
458         dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
459
460         dstv_16h = vec_sr( dstv_16h, shiftv );
461         dstv_16l = vec_sr( dstv_16l, shiftv );
462
463         dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
464         dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
465
466         VEC_STORE8( dstuv, dstu );
467         VEC_STORE8( dstvv, dstv );
468
469         srcp += i_src_stride;
470         dstu += i_dst_stride;
471         dstv += i_dst_stride;
472
473         src0v_8 = src2v_8;
474         src1v_8 = src3v_8;
475         VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
476         VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
477
478         src3v_8 = VSLD( src2v_8, src3v_8, 2 );
479
480         src0v_16h = vec_u8_to_u16_h( src0v_8 );
481         src0v_16l = vec_u8_to_u16_l( src0v_8 );
482         src1v_16h = vec_u8_to_u16_h( src1v_8 );
483         src1v_16l = vec_u8_to_u16_l( src1v_8 );
484         src2v_16h = vec_u8_to_u16_h( src2v_8 );
485         src2v_16l = vec_u8_to_u16_l( src2v_8 );
486         src3v_16h = vec_u8_to_u16_h( src3v_8 );
487         src3v_16l = vec_u8_to_u16_l( src3v_8 );
488
489         dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
490         dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
491         dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
492         dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
493         dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
494         dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
495         dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
496         dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
497
498         dstv_16h = vec_sr( dstv_16h, shiftv );
499         dstv_16l = vec_sr( dstv_16l, shiftv );
500
501         dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
502         dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
503
504         VEC_STORE8( dstuv, dstu );
505         VEC_STORE8( dstvv, dstv );
506
507         srcp += i_src_stride;
508         dstu += i_dst_stride;
509         dstv += i_dst_stride;
510     }
511 }
512
513 static void mc_chroma_altivec( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
514                                uint8_t *src, intptr_t i_src_stride,
515                                int mvx, int mvy, int i_width, int i_height )
516 {
517     if( i_width == 8 )
518         mc_chroma_altivec_8xh( dstu, dstv, i_dst_stride, src, i_src_stride,
519                                mvx, mvy, i_height );
520     else if( i_width == 4 )
521         mc_chroma_altivec_4xh( dstu, dstv, i_dst_stride, src, i_src_stride,
522                                mvx, mvy, i_height );
523     else
524         mc_chroma_2xh( dstu, dstv, i_dst_stride, src, i_src_stride,
525                        mvx, mvy, i_height );
526 }
527
528 #define HPEL_FILTER_1( t1v, t2v, t3v, t4v, t5v, t6v ) \
529 {                                                     \
530     t1v = vec_add( t1v, t6v );                        \
531     t2v = vec_add( t2v, t5v );                        \
532     t3v = vec_add( t3v, t4v );                        \
533                                                       \
534     t1v = vec_sub( t1v, t2v );   /* (a-b) */          \
535     t2v = vec_sub( t2v, t3v );   /* (b-c) */          \
536     t2v = vec_sl(  t2v, twov );  /* (b-c)*4 */        \
537     t1v = vec_sub( t1v, t2v );   /* a-5*b+4*c */      \
538     t3v = vec_sl(  t3v, fourv ); /* 16*c */           \
539     t1v = vec_add( t1v, t3v );   /* a-5*b+20*c */     \
540 }
541
542 #define HPEL_FILTER_2( t1v, t2v, t3v, t4v, t5v, t6v ) \
543 {                                                     \
544     t1v = vec_add( t1v, t6v );                        \
545     t2v = vec_add( t2v, t5v );                        \
546     t3v = vec_add( t3v, t4v );                        \
547                                                       \
548     t1v = vec_sub( t1v, t2v );  /* (a-b) */           \
549     t1v = vec_sra( t1v, twov ); /* (a-b)/4 */         \
550     t1v = vec_sub( t1v, t2v );  /* (a-b)/4-b */       \
551     t1v = vec_add( t1v, t3v );  /* (a-b)/4-b+c */     \
552     t1v = vec_sra( t1v, twov ); /* ((a-b)/4-b+c)/4 */ \
553     t1v = vec_add( t1v, t3v );  /* ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16 */ \
554 }
555
556 #define HPEL_FILTER_HORIZONTAL()                             \
557 {                                                            \
558     VEC_LOAD_G( &src[x- 2+i_stride*y], src1v, 16, vec_u8_t); \
559     VEC_LOAD_G( &src[x+14+i_stride*y], src6v, 16, vec_u8_t); \
560                                                              \
561     src2v = VSLD( src1v, src6v,  1 );                        \
562     src3v = VSLD( src1v, src6v,  2 );                        \
563     src4v = VSLD( src1v, src6v,  3 );                        \
564     src5v = VSLD( src1v, src6v,  4 );                        \
565     src6v = VSLD( src1v, src6v,  5 );                        \
566                                                              \
567     temp1v = vec_u8_to_s16_h( src1v );                       \
568     temp2v = vec_u8_to_s16_h( src2v );                       \
569     temp3v = vec_u8_to_s16_h( src3v );                       \
570     temp4v = vec_u8_to_s16_h( src4v );                       \
571     temp5v = vec_u8_to_s16_h( src5v );                       \
572     temp6v = vec_u8_to_s16_h( src6v );                       \
573                                                              \
574     HPEL_FILTER_1( temp1v, temp2v, temp3v,                   \
575                    temp4v, temp5v, temp6v );                 \
576                                                              \
577     dest1v = vec_add( temp1v, sixteenv );                    \
578     dest1v = vec_sra( dest1v, fivev );                       \
579                                                              \
580     temp1v = vec_u8_to_s16_l( src1v );                       \
581     temp2v = vec_u8_to_s16_l( src2v );                       \
582     temp3v = vec_u8_to_s16_l( src3v );                       \
583     temp4v = vec_u8_to_s16_l( src4v );                       \
584     temp5v = vec_u8_to_s16_l( src5v );                       \
585     temp6v = vec_u8_to_s16_l( src6v );                       \
586                                                              \
587     HPEL_FILTER_1( temp1v, temp2v, temp3v,                   \
588                    temp4v, temp5v, temp6v );                 \
589                                                              \
590     dest2v = vec_add( temp1v, sixteenv );                    \
591     dest2v = vec_sra( dest2v, fivev );                       \
592                                                              \
593     destv = vec_packsu( dest1v, dest2v );                    \
594                                                              \
595     VEC_STORE16( destv, &dsth[x+i_stride*y], dsth );         \
596 }
597
598 #define HPEL_FILTER_VERTICAL()                                    \
599 {                                                                 \
600     VEC_LOAD( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src ); \
601     VEC_LOAD( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src ); \
602     VEC_LOAD( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src ); \
603     VEC_LOAD( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src ); \
604     VEC_LOAD( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src ); \
605     VEC_LOAD( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src ); \
606                                                                   \
607     temp1v = vec_u8_to_s16_h( src1v );                            \
608     temp2v = vec_u8_to_s16_h( src2v );                            \
609     temp3v = vec_u8_to_s16_h( src3v );                            \
610     temp4v = vec_u8_to_s16_h( src4v );                            \
611     temp5v = vec_u8_to_s16_h( src5v );                            \
612     temp6v = vec_u8_to_s16_h( src6v );                            \
613                                                                   \
614     HPEL_FILTER_1( temp1v, temp2v, temp3v,                        \
615                    temp4v, temp5v, temp6v );                      \
616                                                                   \
617     dest1v = vec_add( temp1v, sixteenv );                         \
618     dest1v = vec_sra( dest1v, fivev );                            \
619                                                                   \
620     temp4v = vec_u8_to_s16_l( src1v );                            \
621     temp5v = vec_u8_to_s16_l( src2v );                            \
622     temp6v = vec_u8_to_s16_l( src3v );                            \
623     temp7v = vec_u8_to_s16_l( src4v );                            \
624     temp8v = vec_u8_to_s16_l( src5v );                            \
625     temp9v = vec_u8_to_s16_l( src6v );                            \
626                                                                   \
627     HPEL_FILTER_1( temp4v, temp5v, temp6v,                        \
628                    temp7v, temp8v, temp9v );                      \
629                                                                   \
630     dest2v = vec_add( temp4v, sixteenv );                         \
631     dest2v = vec_sra( dest2v, fivev );                            \
632                                                                   \
633     destv = vec_packsu( dest1v, dest2v );                         \
634                                                                   \
635     VEC_STORE16( destv, &dstv[x+i_stride*y], dsth );              \
636 }
637
638 #define HPEL_FILTER_CENTRAL()                           \
639 {                                                       \
640     temp1v = VSLD( tempav, tempbv, 12 );                \
641     temp2v = VSLD( tempav, tempbv, 14 );                \
642     temp3v = tempbv;                                    \
643     temp4v = VSLD( tempbv, tempcv,  2 );                \
644     temp5v = VSLD( tempbv, tempcv,  4 );                \
645     temp6v = VSLD( tempbv, tempcv,  6 );                \
646                                                         \
647     HPEL_FILTER_2( temp1v, temp2v, temp3v,              \
648                    temp4v, temp5v, temp6v );            \
649                                                         \
650     dest1v = vec_add( temp1v, thirtytwov );             \
651     dest1v = vec_sra( dest1v, sixv );                   \
652                                                         \
653     temp1v = VSLD( tempbv, tempcv, 12 );                \
654     temp2v = VSLD( tempbv, tempcv, 14 );                \
655     temp3v = tempcv;                                    \
656     temp4v = VSLD( tempcv, tempdv,  2 );                \
657     temp5v = VSLD( tempcv, tempdv,  4 );                \
658     temp6v = VSLD( tempcv, tempdv,  6 );                \
659                                                         \
660     HPEL_FILTER_2( temp1v, temp2v, temp3v,              \
661                    temp4v, temp5v, temp6v );            \
662                                                         \
663     dest2v = vec_add( temp1v, thirtytwov );             \
664     dest2v = vec_sra( dest2v, sixv );                   \
665                                                         \
666     destv = vec_packsu( dest1v, dest2v );               \
667                                                         \
668     VEC_STORE16( destv, &dstc[x-16+i_stride*y], dsth ); \
669 }
670
671 void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
672                                intptr_t i_stride, int i_width, int i_height, int16_t *buf )
673 {
674     vec_u8_t destv;
675     vec_u8_t src1v, src2v, src3v, src4v, src5v, src6v;
676     vec_s16_t dest1v, dest2v;
677     vec_s16_t temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, temp8v, temp9v;
678     vec_s16_t tempav, tempbv, tempcv, tempdv, tempev;
679
680     PREP_LOAD;
681     PREP_LOAD_SRC( src);
682     PREP_STORE16;
683     PREP_STORE16_DST( dsth );
684     LOAD_ZERO;
685
686     vec_u16_t twov, fourv, fivev, sixv;
687     vec_s16_t sixteenv, thirtytwov;
688     vec_u16_u temp_u;
689
690     temp_u.s[0]=2;
691     twov = vec_splat( temp_u.v, 0 );
692     temp_u.s[0]=4;
693     fourv = vec_splat( temp_u.v, 0 );
694     temp_u.s[0]=5;
695     fivev = vec_splat( temp_u.v, 0 );
696     temp_u.s[0]=6;
697     sixv = vec_splat( temp_u.v, 0 );
698     temp_u.s[0]=16;
699     sixteenv = (vec_s16_t)vec_splat( temp_u.v, 0 );
700     temp_u.s[0]=32;
701     thirtytwov = (vec_s16_t)vec_splat( temp_u.v, 0 );
702
703     for( int y = 0; y < i_height; y++ )
704     {
705         int x = 0;
706
707         /* horizontal_filter */
708         HPEL_FILTER_HORIZONTAL();
709
710         /* vertical_filter */
711         HPEL_FILTER_VERTICAL();
712
713         /* central_filter */
714         tempav = tempcv;
715         tempbv = tempdv;
716         tempcv = vec_splat( temp1v, 0 ); /* first only */
717         tempdv = temp1v;
718         tempev = temp4v;
719
720         for( x = 16; x < i_width; x+=16 )
721         {
722             /* horizontal_filter */
723             HPEL_FILTER_HORIZONTAL();
724
725             /* vertical_filter */
726             HPEL_FILTER_VERTICAL();
727
728             /* central_filter */
729             tempav = tempcv;
730             tempbv = tempdv;
731             tempcv = tempev;
732             tempdv = temp1v;
733             tempev = temp4v;
734
735             HPEL_FILTER_CENTRAL();
736         }
737
738         /* Partial vertical filter */
739         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src );
740         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src );
741         VEC_LOAD_PARTIAL( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src );
742         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src );
743         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src );
744         VEC_LOAD_PARTIAL( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src );
745
746         temp1v = vec_u8_to_s16_h( src1v );
747         temp2v = vec_u8_to_s16_h( src2v );
748         temp3v = vec_u8_to_s16_h( src3v );
749         temp4v = vec_u8_to_s16_h( src4v );
750         temp5v = vec_u8_to_s16_h( src5v );
751         temp6v = vec_u8_to_s16_h( src6v );
752
753         HPEL_FILTER_1( temp1v, temp2v, temp3v, temp4v, temp5v, temp6v );
754
755         /* central_filter */
756         tempav = tempcv;
757         tempbv = tempdv;
758         tempcv = tempev;
759         tempdv = temp1v;
760         /* tempev is not used */
761
762         HPEL_FILTER_CENTRAL();
763     }
764 }
765
766 static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
767                                             intptr_t src_stride, intptr_t dst_stride, int width, int height )
768 {
769     int w = width >> 4;
770     int end = (width & 15);
771     vec_u8_t src0v, src1v, src2v;
772     vec_u8_t lv, hv, src1p1v;
773     vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
774     static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
775 #ifndef WORDS_BIGENDIAN
776     static const vec_u8_t inverse_bridge_shuffle_1 = CV(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F );
777 #endif
778
779     for( int y = 0; y < height; y++ )
780     {
781         int x;
782         uint8_t *src1 = src0+src_stride;
783         uint8_t *src2 = src1+src_stride;
784
785         src0v = vec_ld(0, src0);
786         src1v = vec_ld(0, src1);
787         src2v = vec_ld(0, src2);
788
789         avg0v = vec_avg(src0v, src1v);
790         avg1v = vec_avg(src1v, src2v);
791
792         for( x = 0; x < w; x++ )
793         {
794             lv = vec_ld(16*(x*2+1), src0);
795             src1v = vec_ld(16*(x*2+1), src1);
796             avghv = vec_avg(lv, src1v);
797
798             lv = vec_ld(16*(x*2+2), src0);
799             src1p1v = vec_ld(16*(x*2+2), src1);
800             avghp1v = vec_avg(lv, src1p1v);
801
802             avgleftv = vec_avg(VSLD(avg0v, avghv, 1), avg0v);
803             avgrightv = vec_avg(VSLD(avghv, avghp1v, 1), avghv);
804
805             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
806 #ifdef WORDS_BIGENDIAN
807             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
808 #else
809             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1), 16*x, dsth);
810 #endif
811
812             avg0v = avghp1v;
813
814             hv = vec_ld(16*(x*2+1), src2);
815             avghv = vec_avg(src1v, hv);
816
817             hv = vec_ld(16*(x*2+2), src2);
818             avghp1v = vec_avg(src1p1v, hv);
819
820             avgleftv = vec_avg(VSLD(avg1v, avghv, 1), avg1v);
821             avgrightv = vec_avg(VSLD(avghv, avghp1v, 1), avghv);
822
823             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
824 #ifdef WORDS_BIGENDIAN
825             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
826 #else
827             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1), 16*x, dstc);
828 #endif
829
830             avg1v = avghp1v;
831
832         }
833         if( end )
834         {
835             lv = vec_ld(16*(x*2+1), src0);
836             src1v = vec_ld(16*(x*2+1), src1);
837             avghv = vec_avg(lv, src1v);
838
839             lv = vec_ld(16*(x*2+1), src2);
840             avghp1v = vec_avg(src1v, lv);
841
842             avgleftv = vec_avg(VSLD(avg0v, avghv, 1), avg0v);
843             avgrightv = vec_avg(VSLD(avg1v, avghp1v, 1), avg1v);
844
845             lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
846 #ifdef WORDS_BIGENDIAN
847             hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
848 #else
849             hv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1);
850 #endif
851
852             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
853             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
854             vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dsth);
855             vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dsth);
856
857             lv = vec_sld(lv, lv, 8);
858             hv = vec_sld(hv, hv, 8);
859
860             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dstv);
861             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dstv);
862             vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dstc);
863             vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dstc);
864         }
865
866         src0 += src_stride*2;
867         dst0 += dst_stride;
868         dsth += dst_stride;
869         dstv += dst_stride;
870         dstc += dst_stride;
871     }
872 }
873
874 static void mc_weight_w2_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
875                                   const x264_weight_t *weight, int i_height )
876 {
877     LOAD_ZERO;
878     PREP_LOAD;
879     PREP_LOAD_SRC( src );
880     vec_u8_t srcv;
881     vec_s16_t weightv;
882     vec_s16_t scalev, offsetv, denomv, roundv;
883     vec_s16_u loadv;
884
885     int denom = weight->i_denom;
886
887     loadv.s[0] = weight->i_scale;
888     scalev = vec_splat( loadv.v, 0 );
889
890     loadv.s[0] = weight->i_offset;
891     offsetv = vec_splat( loadv.v, 0 );
892
893     if( denom >= 1 )
894     {
895         loadv.s[0] = denom;
896         denomv = vec_splat( loadv.v, 0 );
897
898         loadv.s[0] = 1<<(denom - 1);
899         roundv = vec_splat( loadv.v, 0 );
900
901         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
902         {
903             VEC_LOAD( src, srcv, 2, vec_u8_t, src );
904             weightv = vec_u8_to_s16( srcv );
905
906             weightv = vec_mladd( weightv, scalev, roundv );
907             weightv = vec_sra( weightv, (vec_u16_t)denomv );
908             weightv = vec_add( weightv, offsetv );
909
910             srcv = vec_packsu( weightv, zero_s16v );
911             vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
912         }
913     }
914     else
915     {
916         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
917         {
918             VEC_LOAD( src, srcv, 2, vec_u8_t, src );
919             weightv = vec_u8_to_s16( srcv );
920
921             weightv = vec_mladd( weightv, scalev, offsetv );
922
923             srcv = vec_packsu( weightv, zero_s16v );
924             vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
925         }
926     }
927 }
928 static void mc_weight_w4_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
929                                   const x264_weight_t *weight, int i_height )
930 {
931     LOAD_ZERO;
932     PREP_LOAD;
933     PREP_LOAD_SRC( src );
934     vec_u8_t srcv;
935     vec_s16_t weightv;
936     vec_s16_t scalev, offsetv, denomv, roundv;
937     vec_s16_u loadv;
938
939     int denom = weight->i_denom;
940
941     loadv.s[0] = weight->i_scale;
942     scalev = vec_splat( loadv.v, 0 );
943
944     loadv.s[0] = weight->i_offset;
945     offsetv = vec_splat( loadv.v, 0 );
946
947     if( denom >= 1 )
948     {
949         loadv.s[0] = denom;
950         denomv = vec_splat( loadv.v, 0 );
951
952         loadv.s[0] = 1<<(denom - 1);
953         roundv = vec_splat( loadv.v, 0 );
954
955         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
956         {
957             VEC_LOAD( src, srcv, 4, vec_u8_t, src );
958             weightv = vec_u8_to_s16( srcv );
959
960             weightv = vec_mladd( weightv, scalev, roundv );
961             weightv = vec_sra( weightv, (vec_u16_t)denomv );
962             weightv = vec_add( weightv, offsetv );
963
964             srcv = vec_packsu( weightv, zero_s16v );
965             vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
966         }
967     }
968     else
969     {
970         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
971         {
972             VEC_LOAD( src, srcv, 4, vec_u8_t, src );
973             weightv = vec_u8_to_s16( srcv );
974
975             weightv = vec_mladd( weightv, scalev, offsetv );
976
977             srcv = vec_packsu( weightv, zero_s16v );
978             vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
979         }
980     }
981 }
982 static void mc_weight_w8_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
983                                   const x264_weight_t *weight, int i_height )
984 {
985     LOAD_ZERO;
986     PREP_LOAD;
987     PREP_LOAD_SRC( src );
988     PREP_STORE8;
989     vec_u8_t srcv;
990     vec_s16_t weightv;
991     vec_s16_t scalev, offsetv, denomv, roundv;
992     vec_s16_u loadv;
993
994     int denom = weight->i_denom;
995
996     loadv.s[0] = weight->i_scale;
997     scalev = vec_splat( loadv.v, 0 );
998
999     loadv.s[0] = weight->i_offset;
1000     offsetv = vec_splat( loadv.v, 0 );
1001
1002     if( denom >= 1 )
1003     {
1004         loadv.s[0] = denom;
1005         denomv = vec_splat( loadv.v, 0 );
1006
1007         loadv.s[0] = 1<<(denom - 1);
1008         roundv = vec_splat( loadv.v, 0 );
1009
1010         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1011         {
1012             VEC_LOAD( src, srcv, 8, vec_u8_t, src );
1013             weightv = vec_u8_to_s16( srcv );
1014
1015             weightv = vec_mladd( weightv, scalev, roundv );
1016             weightv = vec_sra( weightv, (vec_u16_t)denomv );
1017             weightv = vec_add( weightv, offsetv );
1018
1019             srcv = vec_packsu( weightv, zero_s16v );
1020             VEC_STORE8( srcv, dst );
1021         }
1022     }
1023     else
1024     {
1025         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1026         {
1027             VEC_LOAD( src, srcv, 8, vec_u8_t, src );
1028             weightv = vec_u8_to_s16( srcv );
1029
1030             weightv = vec_mladd( weightv, scalev, offsetv );
1031
1032             srcv = vec_packsu( weightv, zero_s16v );
1033             VEC_STORE8( srcv, dst );
1034         }
1035     }
1036 }
1037 static void mc_weight_w16_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
1038                                    const x264_weight_t *weight, int i_height )
1039 {
1040     LOAD_ZERO;
1041     PREP_LOAD;
1042     PREP_LOAD_SRC( src );
1043     vec_u8_t srcv;
1044     vec_s16_t weight_lv, weight_hv;
1045     vec_s16_t scalev, offsetv, denomv, roundv;
1046     vec_s16_u loadv;
1047
1048     int denom = weight->i_denom;
1049
1050     loadv.s[0] = weight->i_scale;
1051     scalev = vec_splat( loadv.v, 0 );
1052
1053     loadv.s[0] = weight->i_offset;
1054     offsetv = vec_splat( loadv.v, 0 );
1055
1056     if( denom >= 1 )
1057     {
1058         loadv.s[0] = denom;
1059         denomv = vec_splat( loadv.v, 0 );
1060
1061         loadv.s[0] = 1<<(denom - 1);
1062         roundv = vec_splat( loadv.v, 0 );
1063
1064         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1065         {
1066             VEC_LOAD( src, srcv, 16, vec_u8_t, src );
1067             weight_hv = vec_u8_to_s16_h( srcv );
1068             weight_lv = vec_u8_to_s16_l( srcv );
1069
1070             weight_hv = vec_mladd( weight_hv, scalev, roundv );
1071             weight_lv = vec_mladd( weight_lv, scalev, roundv );
1072             weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
1073             weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
1074             weight_hv = vec_add( weight_hv, offsetv );
1075             weight_lv = vec_add( weight_lv, offsetv );
1076
1077             srcv = vec_packsu( weight_hv, weight_lv );
1078             vec_st( srcv, 0, dst );
1079         }
1080     }
1081     else
1082     {
1083         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1084         {
1085             VEC_LOAD( src, srcv, 16, vec_u8_t, src );
1086             weight_hv = vec_u8_to_s16_h( srcv );
1087             weight_lv = vec_u8_to_s16_l( srcv );
1088
1089             weight_hv = vec_mladd( weight_hv, scalev, offsetv );
1090             weight_lv = vec_mladd( weight_lv, scalev, offsetv );
1091
1092             srcv = vec_packsu( weight_hv, weight_lv );
1093             vec_st( srcv, 0, dst );
1094         }
1095     }
1096 }
1097 static void mc_weight_w20_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
1098                                    const x264_weight_t *weight, int i_height )
1099 {
1100     LOAD_ZERO;
1101     PREP_LOAD_SRC( src );
1102     vec_u8_t src_1v, src_2v, src_3v;
1103     vec_s16_t weight_lv, weight_hv, weight_3v;
1104     vec_s16_t scalev, offsetv, denomv, roundv;
1105     vec_s16_u loadv;
1106
1107     int denom = weight->i_denom;
1108
1109     loadv.s[0] = weight->i_scale;
1110     scalev = vec_splat( loadv.v, 0 );
1111
1112     loadv.s[0] = weight->i_offset;
1113     offsetv = vec_splat( loadv.v, 0 );
1114
1115     if( denom >= 1 )
1116     {
1117         loadv.s[0] = denom;
1118         denomv = vec_splat( loadv.v, 0 );
1119
1120         loadv.s[0] = 1<<(denom - 1);
1121         roundv = vec_splat( loadv.v, 0 );
1122
1123         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1124         {
1125             src_1v = vec_ld( 0,  src );
1126             src_2v = vec_ld( 16, src );
1127             src_3v = vec_ld( 19, src );
1128             src_1v = vec_perm( src_1v, src_2v, _src_ );
1129             src_3v = vec_perm( src_2v, src_3v, _src_ );
1130             weight_hv = vec_u8_to_s16_h( src_1v );
1131             weight_lv = vec_u8_to_s16_l( src_1v );
1132             weight_3v = vec_u8_to_s16_h( src_3v );
1133
1134             weight_hv = vec_mladd( weight_hv, scalev, roundv );
1135             weight_lv = vec_mladd( weight_lv, scalev, roundv );
1136             weight_3v = vec_mladd( weight_3v, scalev, roundv );
1137             weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
1138             weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
1139             weight_3v = vec_sra( weight_3v, (vec_u16_t)denomv );
1140             weight_hv = vec_add( weight_hv, offsetv );
1141             weight_lv = vec_add( weight_lv, offsetv );
1142             weight_3v = vec_add( weight_3v, offsetv );
1143
1144             src_1v = vec_packsu( weight_hv, weight_lv );
1145             src_3v = vec_packsu( weight_3v, zero_s16v );
1146             vec_st( src_1v, 0, dst );
1147             vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
1148         }
1149     }
1150     else
1151     {
1152         for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1153         {
1154             src_1v = vec_ld( 0,  src );
1155             src_2v = vec_ld( 16, src );
1156             src_3v = vec_ld( 19, src );
1157             src_1v = vec_perm( src_1v, src_2v, _src_ );
1158             src_3v = vec_perm( src_2v, src_3v, _src_ );
1159             weight_hv = vec_u8_to_s16_h( src_1v );
1160             weight_lv = vec_u8_to_s16_l( src_1v );
1161             weight_3v = vec_u8_to_s16_h( src_3v );
1162
1163             weight_hv = vec_mladd( weight_hv, scalev, offsetv );
1164             weight_lv = vec_mladd( weight_lv, scalev, offsetv );
1165             weight_3v = vec_mladd( weight_3v, scalev, offsetv );
1166
1167             src_1v = vec_packsu( weight_hv, weight_lv );
1168             src_3v = vec_packsu( weight_3v, zero_s16v );
1169             vec_st( src_1v, 0, dst );
1170             vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
1171         }
1172     }
1173 }
1174
1175 static weight_fn_t x264_mc_weight_wtab_altivec[6] =
1176 {
1177     mc_weight_w2_altivec,
1178     mc_weight_w4_altivec,
1179     mc_weight_w8_altivec,
1180     mc_weight_w16_altivec,
1181     mc_weight_w16_altivec,
1182     mc_weight_w20_altivec,
1183 };
1184
1185 #endif // !HIGH_BIT_DEPTH
1186
1187 void x264_mc_altivec_init( x264_mc_functions_t *pf )
1188 {
1189 #if !HIGH_BIT_DEPTH
1190     pf->mc_luma   = mc_luma_altivec;
1191     pf->get_ref   = get_ref_altivec;
1192     pf->mc_chroma = mc_chroma_altivec;
1193
1194     pf->copy_16x16_unaligned = x264_mc_copy_w16_altivec;
1195     pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_altivec;
1196
1197     pf->hpel_filter = x264_hpel_filter_altivec;
1198     pf->frame_init_lowres_core = frame_init_lowres_core_altivec;
1199
1200     pf->weight = x264_mc_weight_wtab_altivec;
1201 #endif // !HIGH_BIT_DEPTH
1202 }