1 /*****************************************************************************
2 * mc.c: ppc motion compensation
3 *****************************************************************************
4 * Copyright (C) 2003-2016 x264 project
6 * Authors: Eric Petit <eric.petit@lapsus.org>
7 * Guillaume Poirier <gpoirier@mplayerhq.hu>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 * This program is also available under a commercial proprietary license.
24 * For more information, contact us at licensing@x264.com.
25 *****************************************************************************/
34 #include "common/common.h"
35 #include "common/mc.h"
37 #include "ppccommon.h"
40 typedef void (*pf_mc_t)( uint8_t *src, intptr_t i_src,
41 uint8_t *dst, intptr_t i_dst, int i_height );
43 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
45 return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] +
46 pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] +
50 static inline int x264_tapfilter1( uint8_t *pix )
52 return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] +
56 static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst, intptr_t i_dst,
57 uint8_t *src1, intptr_t i_src1,
58 uint8_t *src2, int i_height )
60 for( int y = 0; y < i_height; y++ )
62 for( int x = 0; x < 4; x++ )
63 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
70 static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst, intptr_t i_dst,
71 uint8_t *src1, intptr_t i_src1,
72 uint8_t *src2, int i_height )
74 vec_u8_t src1v, src2v;
77 PREP_LOAD_SRC( src1 );
78 PREP_LOAD_SRC( src2 );
80 for( int y = 0; y < i_height; y++ )
82 VEC_LOAD( src1, src1v, 8, vec_u8_t, src1 );
83 VEC_LOAD( src2, src2v, 8, vec_u8_t, src2 );
84 src1v = vec_avg( src1v, src2v );
85 VEC_STORE8( src1v, dst );
93 static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst, intptr_t i_dst,
94 uint8_t *src1, intptr_t i_src1,
95 uint8_t *src2, int i_height )
97 vec_u8_t src1v, src2v;
99 PREP_LOAD_SRC( src1 );
100 PREP_LOAD_SRC( src2 );
102 for( int y = 0; y < i_height; y++ )
104 VEC_LOAD( src1, src1v, 16, vec_u8_t, src1 );
105 VEC_LOAD( src2, src2v, 16, vec_u8_t, src2 );
106 src1v = vec_avg( src1v, src2v );
107 vec_st(src1v, 0, dst);
115 static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst, intptr_t i_dst,
116 uint8_t *src1, intptr_t i_src1,
117 uint8_t *src2, int i_height )
119 x264_pixel_avg2_w16_altivec(dst, i_dst, src1, i_src1, src2, i_height);
120 x264_pixel_avg2_w4_altivec(dst+16, i_dst, src1+16, i_src1, src2+16, i_height);
123 /* mc_copy: plain c */
125 #define MC_COPY( name, a ) \
126 static void name( uint8_t *dst, intptr_t i_dst, \
127 uint8_t *src, intptr_t i_src, int i_height ) \
130 for( y = 0; y < i_height; y++ ) \
132 memcpy( dst, src, a ); \
137 MC_COPY( x264_mc_copy_w4_altivec, 4 )
138 MC_COPY( x264_mc_copy_w8_altivec, 8 )
140 static void x264_mc_copy_w16_altivec( uint8_t *dst, intptr_t i_dst,
141 uint8_t *src, intptr_t i_src, int i_height )
145 PREP_LOAD_SRC( src );
147 for( int y = 0; y < i_height; y++ )
149 VEC_LOAD( src, cpyV, 16, vec_u8_t, src );
150 vec_st(cpyV, 0, dst);
158 static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, intptr_t i_dst,
159 uint8_t *src, intptr_t i_src, int i_height )
161 for( int y = 0; y < i_height; ++y )
163 vec_u8_t cpyV = vec_ld( 0, src );
164 vec_st(cpyV, 0, dst);
172 static void mc_luma_altivec( uint8_t *dst, intptr_t i_dst_stride,
173 uint8_t *src[4], intptr_t i_src_stride,
175 int i_width, int i_height, const x264_weight_t *weight )
177 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
178 intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
179 uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
180 if( qpel_idx & 5 ) /* qpel interpolation needed */
182 uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
187 x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
190 x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
194 x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
196 if( weight->weightfn )
197 weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
199 else if( weight->weightfn )
200 weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
206 x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
209 x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
212 x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
220 static uint8_t *get_ref_altivec( uint8_t *dst, intptr_t *i_dst_stride,
221 uint8_t *src[4], intptr_t i_src_stride,
223 int i_width, int i_height, const x264_weight_t *weight )
225 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
226 intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
227 uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
228 if( qpel_idx & 5 ) /* qpel interpolation needed */
230 uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
234 x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
237 x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
242 x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
245 x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
248 if( weight->weightfn )
249 weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
252 else if( weight->weightfn )
254 weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
259 *i_dst_stride = i_src_stride;
264 static void mc_chroma_2xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
265 uint8_t *src, intptr_t i_src_stride,
266 int mvx, int mvy, int i_height )
272 int cA = (8-d8x)*(8-d8y);
273 int cB = d8x *(8-d8y);
274 int cC = (8-d8x)*d8y;
277 src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
278 srcp = &src[i_src_stride];
280 for( int y = 0; y < i_height; y++ )
282 dstu[0] = ( cA*src[0] + cB*src[2] + cC*srcp[0] + cD*srcp[2] + 32 ) >> 6;
283 dstv[0] = ( cA*src[1] + cB*src[3] + cC*srcp[1] + cD*srcp[3] + 32 ) >> 6;
284 dstu[1] = ( cA*src[2] + cB*src[4] + cC*srcp[2] + cD*srcp[4] + 32 ) >> 6;
285 dstv[1] = ( cA*src[3] + cB*src[5] + cC*srcp[3] + cD*srcp[5] + 32 ) >> 6;
288 srcp += i_src_stride;
289 dstu += i_dst_stride;
290 dstv += i_dst_stride;
294 #ifdef WORDS_BIGENDIAN
295 #define VSLD(a,b,n) vec_sld(a,b,n)
297 #define VSLD(a,b,n) vec_sld(b,a,16-n)
300 static void mc_chroma_altivec_4xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
301 uint8_t *src, intptr_t i_src_stride,
302 int mvx, int mvy, int i_height )
305 int d8x = mvx & 0x07;
306 int d8y = mvy & 0x07;
308 ALIGNED_16( uint16_t coeff[4] );
309 coeff[0] = (8-d8x)*(8-d8y);
310 coeff[1] = d8x *(8-d8y);
311 coeff[2] = (8-d8x)*d8y;
314 src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
315 srcp = &src[i_src_stride];
319 PREP_LOAD_SRC( src );
320 vec_u16_t coeff0v, coeff1v, coeff2v, coeff3v;
321 vec_u8_t src2v_8, dstuv, dstvv;
322 vec_u16_t src0v_16, src1v_16, src2v_16, src3v_16, dstv16;
323 vec_u16_t shiftv, k32v;
325 #ifdef WORDS_BIGENDIAN
326 static const vec_u8_t perm0v = CV(1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13);
327 static const vec_u8_t perm1v = CV(3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15);
329 static const vec_u8_t perm0v = CV(0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12);
330 static const vec_u8_t perm1v = CV(2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14);
333 coeff0v = vec_ld( 0, coeff );
334 coeff3v = vec_splat( coeff0v, 3 );
335 coeff2v = vec_splat( coeff0v, 2 );
336 coeff1v = vec_splat( coeff0v, 1 );
337 coeff0v = vec_splat( coeff0v, 0 );
338 k32v = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
339 shiftv = vec_splat_u16( 6 );
341 VEC_LOAD( src, src2v_8, 9, vec_u8_t, src );
342 src2v_16 = vec_u8_to_u16( src2v_8 );
343 src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
345 for( int y = 0; y < i_height; y += 2 )
349 VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
350 src2v_16 = vec_u8_to_u16( src2v_8 );
351 src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
353 dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
354 dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
355 dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
356 dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
358 dstv16 = vec_sr( dstv16, shiftv );
360 dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
361 dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
362 vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
363 vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
365 srcp += i_src_stride;
366 dstu += i_dst_stride;
367 dstv += i_dst_stride;
371 VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
372 src2v_16 = vec_u8_to_u16( src2v_8 );
373 src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
375 dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
376 dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
377 dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
378 dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
380 dstv16 = vec_sr( dstv16, shiftv );
382 dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
383 dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
384 vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
385 vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
387 srcp += i_src_stride;
388 dstu += i_dst_stride;
389 dstv += i_dst_stride;
393 static void mc_chroma_altivec_8xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
394 uint8_t *src, intptr_t i_src_stride,
395 int mvx, int mvy, int i_height )
398 int d8x = mvx & 0x07;
399 int d8y = mvy & 0x07;
401 ALIGNED_16( uint16_t coeff[4] );
402 coeff[0] = (8-d8x)*(8-d8y);
403 coeff[1] = d8x *(8-d8y);
404 coeff[2] = (8-d8x)*d8y;
407 src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
408 srcp = &src[i_src_stride];
412 PREP_LOAD_SRC( src );
414 vec_u16_t coeff0v, coeff1v, coeff2v, coeff3v;
415 vec_u8_t src0v_8, src1v_8, src2v_8, src3v_8;
416 vec_u8_t dstuv, dstvv;
417 vec_u16_t src0v_16h, src1v_16h, src2v_16h, src3v_16h, dstv_16h;
418 vec_u16_t src0v_16l, src1v_16l, src2v_16l, src3v_16l, dstv_16l;
419 vec_u16_t shiftv, k32v;
421 coeff0v = vec_ld( 0, coeff );
422 coeff3v = vec_splat( coeff0v, 3 );
423 coeff2v = vec_splat( coeff0v, 2 );
424 coeff1v = vec_splat( coeff0v, 1 );
425 coeff0v = vec_splat( coeff0v, 0 );
426 k32v = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
427 shiftv = vec_splat_u16( 6 );
429 #ifdef WORDS_BIGENDIAN
430 static const vec_u8_t perm0v = CV(1,5,9,13,17,21,25,29,0,0,0,0,0,0,0,0);
431 static const vec_u8_t perm1v = CV(3,7,11,15,19,23,27,31,0,0,0,0,0,0,0,0);
433 static const vec_u8_t perm0v = CV(0,4,8,12,16,20,24,28,1,1,1,1,1,1,1,1);
434 static const vec_u8_t perm1v = CV(2,6,10,14,18,22,26,30,1,1,1,1,1,1,1,1);
437 VEC_LOAD( src, src2v_8, 16, vec_u8_t, src );
438 VEC_LOAD( src+16, src3v_8, 2, vec_u8_t, src );
439 src3v_8 = VSLD( src2v_8, src3v_8, 2 );
441 for( int y = 0; y < i_height; y += 2 )
445 VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
446 VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
448 src3v_8 = VSLD( src2v_8, src3v_8, 2 );
450 src0v_16h = vec_u8_to_u16_h( src0v_8 );
451 src0v_16l = vec_u8_to_u16_l( src0v_8 );
452 src1v_16h = vec_u8_to_u16_h( src1v_8 );
453 src1v_16l = vec_u8_to_u16_l( src1v_8 );
454 src2v_16h = vec_u8_to_u16_h( src2v_8 );
455 src2v_16l = vec_u8_to_u16_l( src2v_8 );
456 src3v_16h = vec_u8_to_u16_h( src3v_8 );
457 src3v_16l = vec_u8_to_u16_l( src3v_8 );
459 dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
460 dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
461 dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
462 dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
463 dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
464 dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
465 dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
466 dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
468 dstv_16h = vec_sr( dstv_16h, shiftv );
469 dstv_16l = vec_sr( dstv_16l, shiftv );
471 dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
472 dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
474 VEC_STORE8( dstuv, dstu );
475 VEC_STORE8( dstvv, dstv );
477 srcp += i_src_stride;
478 dstu += i_dst_stride;
479 dstv += i_dst_stride;
483 VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
484 VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
486 src3v_8 = VSLD( src2v_8, src3v_8, 2 );
488 src0v_16h = vec_u8_to_u16_h( src0v_8 );
489 src0v_16l = vec_u8_to_u16_l( src0v_8 );
490 src1v_16h = vec_u8_to_u16_h( src1v_8 );
491 src1v_16l = vec_u8_to_u16_l( src1v_8 );
492 src2v_16h = vec_u8_to_u16_h( src2v_8 );
493 src2v_16l = vec_u8_to_u16_l( src2v_8 );
494 src3v_16h = vec_u8_to_u16_h( src3v_8 );
495 src3v_16l = vec_u8_to_u16_l( src3v_8 );
497 dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
498 dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
499 dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
500 dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
501 dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
502 dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
503 dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
504 dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
506 dstv_16h = vec_sr( dstv_16h, shiftv );
507 dstv_16l = vec_sr( dstv_16l, shiftv );
509 dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
510 dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
512 VEC_STORE8( dstuv, dstu );
513 VEC_STORE8( dstvv, dstv );
515 srcp += i_src_stride;
516 dstu += i_dst_stride;
517 dstv += i_dst_stride;
521 static void mc_chroma_altivec( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
522 uint8_t *src, intptr_t i_src_stride,
523 int mvx, int mvy, int i_width, int i_height )
526 mc_chroma_altivec_8xh( dstu, dstv, i_dst_stride, src, i_src_stride,
527 mvx, mvy, i_height );
528 else if( i_width == 4 )
529 mc_chroma_altivec_4xh( dstu, dstv, i_dst_stride, src, i_src_stride,
530 mvx, mvy, i_height );
532 mc_chroma_2xh( dstu, dstv, i_dst_stride, src, i_src_stride,
533 mvx, mvy, i_height );
536 #define HPEL_FILTER_1( t1v, t2v, t3v, t4v, t5v, t6v ) \
538 t1v = vec_add( t1v, t6v ); \
539 t2v = vec_add( t2v, t5v ); \
540 t3v = vec_add( t3v, t4v ); \
542 t1v = vec_sub( t1v, t2v ); /* (a-b) */ \
543 t2v = vec_sub( t2v, t3v ); /* (b-c) */ \
544 t2v = vec_sl( t2v, twov ); /* (b-c)*4 */ \
545 t1v = vec_sub( t1v, t2v ); /* a-5*b+4*c */ \
546 t3v = vec_sl( t3v, fourv ); /* 16*c */ \
547 t1v = vec_add( t1v, t3v ); /* a-5*b+20*c */ \
550 #define HPEL_FILTER_2( t1v, t2v, t3v, t4v, t5v, t6v ) \
552 t1v = vec_add( t1v, t6v ); \
553 t2v = vec_add( t2v, t5v ); \
554 t3v = vec_add( t3v, t4v ); \
556 t1v = vec_sub( t1v, t2v ); /* (a-b) */ \
557 t1v = vec_sra( t1v, twov ); /* (a-b)/4 */ \
558 t1v = vec_sub( t1v, t2v ); /* (a-b)/4-b */ \
559 t1v = vec_add( t1v, t3v ); /* (a-b)/4-b+c */ \
560 t1v = vec_sra( t1v, twov ); /* ((a-b)/4-b+c)/4 */ \
561 t1v = vec_add( t1v, t3v ); /* ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16 */ \
564 #define HPEL_FILTER_HORIZONTAL() \
566 VEC_LOAD_G( &src[x- 2+i_stride*y], src1v, 16, vec_u8_t); \
567 VEC_LOAD_G( &src[x+14+i_stride*y], src6v, 16, vec_u8_t); \
569 src2v = VSLD( src1v, src6v, 1 ); \
570 src3v = VSLD( src1v, src6v, 2 ); \
571 src4v = VSLD( src1v, src6v, 3 ); \
572 src5v = VSLD( src1v, src6v, 4 ); \
573 src6v = VSLD( src1v, src6v, 5 ); \
575 temp1v = vec_u8_to_s16_h( src1v ); \
576 temp2v = vec_u8_to_s16_h( src2v ); \
577 temp3v = vec_u8_to_s16_h( src3v ); \
578 temp4v = vec_u8_to_s16_h( src4v ); \
579 temp5v = vec_u8_to_s16_h( src5v ); \
580 temp6v = vec_u8_to_s16_h( src6v ); \
582 HPEL_FILTER_1( temp1v, temp2v, temp3v, \
583 temp4v, temp5v, temp6v ); \
585 dest1v = vec_add( temp1v, sixteenv ); \
586 dest1v = vec_sra( dest1v, fivev ); \
588 temp1v = vec_u8_to_s16_l( src1v ); \
589 temp2v = vec_u8_to_s16_l( src2v ); \
590 temp3v = vec_u8_to_s16_l( src3v ); \
591 temp4v = vec_u8_to_s16_l( src4v ); \
592 temp5v = vec_u8_to_s16_l( src5v ); \
593 temp6v = vec_u8_to_s16_l( src6v ); \
595 HPEL_FILTER_1( temp1v, temp2v, temp3v, \
596 temp4v, temp5v, temp6v ); \
598 dest2v = vec_add( temp1v, sixteenv ); \
599 dest2v = vec_sra( dest2v, fivev ); \
601 destv = vec_packsu( dest1v, dest2v ); \
603 VEC_STORE16( destv, &dsth[x+i_stride*y], dsth ); \
606 #define HPEL_FILTER_VERTICAL() \
608 VEC_LOAD( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src ); \
609 VEC_LOAD( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src ); \
610 VEC_LOAD( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src ); \
611 VEC_LOAD( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src ); \
612 VEC_LOAD( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src ); \
613 VEC_LOAD( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src ); \
615 temp1v = vec_u8_to_s16_h( src1v ); \
616 temp2v = vec_u8_to_s16_h( src2v ); \
617 temp3v = vec_u8_to_s16_h( src3v ); \
618 temp4v = vec_u8_to_s16_h( src4v ); \
619 temp5v = vec_u8_to_s16_h( src5v ); \
620 temp6v = vec_u8_to_s16_h( src6v ); \
622 HPEL_FILTER_1( temp1v, temp2v, temp3v, \
623 temp4v, temp5v, temp6v ); \
625 dest1v = vec_add( temp1v, sixteenv ); \
626 dest1v = vec_sra( dest1v, fivev ); \
628 temp4v = vec_u8_to_s16_l( src1v ); \
629 temp5v = vec_u8_to_s16_l( src2v ); \
630 temp6v = vec_u8_to_s16_l( src3v ); \
631 temp7v = vec_u8_to_s16_l( src4v ); \
632 temp8v = vec_u8_to_s16_l( src5v ); \
633 temp9v = vec_u8_to_s16_l( src6v ); \
635 HPEL_FILTER_1( temp4v, temp5v, temp6v, \
636 temp7v, temp8v, temp9v ); \
638 dest2v = vec_add( temp4v, sixteenv ); \
639 dest2v = vec_sra( dest2v, fivev ); \
641 destv = vec_packsu( dest1v, dest2v ); \
643 VEC_STORE16( destv, &dstv[x+i_stride*y], dsth ); \
646 #define HPEL_FILTER_CENTRAL() \
648 temp1v = VSLD( tempav, tempbv, 12 ); \
649 temp2v = VSLD( tempav, tempbv, 14 ); \
651 temp4v = VSLD( tempbv, tempcv, 2 ); \
652 temp5v = VSLD( tempbv, tempcv, 4 ); \
653 temp6v = VSLD( tempbv, tempcv, 6 ); \
655 HPEL_FILTER_2( temp1v, temp2v, temp3v, \
656 temp4v, temp5v, temp6v ); \
658 dest1v = vec_add( temp1v, thirtytwov ); \
659 dest1v = vec_sra( dest1v, sixv ); \
661 temp1v = VSLD( tempbv, tempcv, 12 ); \
662 temp2v = VSLD( tempbv, tempcv, 14 ); \
664 temp4v = VSLD( tempcv, tempdv, 2 ); \
665 temp5v = VSLD( tempcv, tempdv, 4 ); \
666 temp6v = VSLD( tempcv, tempdv, 6 ); \
668 HPEL_FILTER_2( temp1v, temp2v, temp3v, \
669 temp4v, temp5v, temp6v ); \
671 dest2v = vec_add( temp1v, thirtytwov ); \
672 dest2v = vec_sra( dest2v, sixv ); \
674 destv = vec_packsu( dest1v, dest2v ); \
676 VEC_STORE16( destv, &dstc[x-16+i_stride*y], dsth ); \
679 void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
680 intptr_t i_stride, int i_width, int i_height, int16_t *buf )
683 vec_u8_t src1v, src2v, src3v, src4v, src5v, src6v;
684 vec_s16_t dest1v, dest2v;
685 vec_s16_t temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, temp8v, temp9v;
686 vec_s16_t tempav, tempbv, tempcv, tempdv, tempev;
691 PREP_STORE16_DST( dsth );
694 vec_u16_t twov, fourv, fivev, sixv;
695 vec_s16_t sixteenv, thirtytwov;
699 twov = vec_splat( temp_u.v, 0 );
701 fourv = vec_splat( temp_u.v, 0 );
703 fivev = vec_splat( temp_u.v, 0 );
705 sixv = vec_splat( temp_u.v, 0 );
707 sixteenv = (vec_s16_t)vec_splat( temp_u.v, 0 );
709 thirtytwov = (vec_s16_t)vec_splat( temp_u.v, 0 );
711 for( int y = 0; y < i_height; y++ )
715 /* horizontal_filter */
716 HPEL_FILTER_HORIZONTAL();
718 /* vertical_filter */
719 HPEL_FILTER_VERTICAL();
724 tempcv = vec_splat( temp1v, 0 ); /* first only */
728 for( x = 16; x < i_width; x+=16 )
730 /* horizontal_filter */
731 HPEL_FILTER_HORIZONTAL();
733 /* vertical_filter */
734 HPEL_FILTER_VERTICAL();
743 HPEL_FILTER_CENTRAL();
746 /* Partial vertical filter */
747 VEC_LOAD_PARTIAL( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src );
748 VEC_LOAD_PARTIAL( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src );
749 VEC_LOAD_PARTIAL( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src );
750 VEC_LOAD_PARTIAL( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src );
751 VEC_LOAD_PARTIAL( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src );
752 VEC_LOAD_PARTIAL( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src );
754 temp1v = vec_u8_to_s16_h( src1v );
755 temp2v = vec_u8_to_s16_h( src2v );
756 temp3v = vec_u8_to_s16_h( src3v );
757 temp4v = vec_u8_to_s16_h( src4v );
758 temp5v = vec_u8_to_s16_h( src5v );
759 temp6v = vec_u8_to_s16_h( src6v );
761 HPEL_FILTER_1( temp1v, temp2v, temp3v, temp4v, temp5v, temp6v );
768 /* tempev is not used */
770 HPEL_FILTER_CENTRAL();
774 static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
775 intptr_t src_stride, intptr_t dst_stride, int width, int height )
778 int end = (width & 15);
779 vec_u8_t src0v, src1v, src2v;
780 vec_u8_t lv, hv, src1p1v;
781 vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
782 static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
783 #ifndef WORDS_BIGENDIAN
784 static const vec_u8_t inverse_bridge_shuffle_1 = CV(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F );
787 for( int y = 0; y < height; y++ )
790 uint8_t *src1 = src0+src_stride;
791 uint8_t *src2 = src1+src_stride;
793 src0v = vec_ld(0, src0);
794 src1v = vec_ld(0, src1);
795 src2v = vec_ld(0, src2);
797 avg0v = vec_avg(src0v, src1v);
798 avg1v = vec_avg(src1v, src2v);
800 for( x = 0; x < w; x++ )
802 lv = vec_ld(16*(x*2+1), src0);
803 src1v = vec_ld(16*(x*2+1), src1);
804 avghv = vec_avg(lv, src1v);
806 lv = vec_ld(16*(x*2+2), src0);
807 src1p1v = vec_ld(16*(x*2+2), src1);
808 avghp1v = vec_avg(lv, src1p1v);
810 avgleftv = vec_avg(VSLD(avg0v, avghv, 1), avg0v);
811 avgrightv = vec_avg(VSLD(avghv, avghp1v, 1), avghv);
813 vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
814 #ifdef WORDS_BIGENDIAN
815 vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
817 vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1), 16*x, dsth);
822 hv = vec_ld(16*(x*2+1), src2);
823 avghv = vec_avg(src1v, hv);
825 hv = vec_ld(16*(x*2+2), src2);
826 avghp1v = vec_avg(src1p1v, hv);
828 avgleftv = vec_avg(VSLD(avg1v, avghv, 1), avg1v);
829 avgrightv = vec_avg(VSLD(avghv, avghp1v, 1), avghv);
831 vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
832 #ifdef WORDS_BIGENDIAN
833 vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
835 vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1), 16*x, dstc);
843 lv = vec_ld(16*(x*2+1), src0);
844 src1v = vec_ld(16*(x*2+1), src1);
845 avghv = vec_avg(lv, src1v);
847 lv = vec_ld(16*(x*2+1), src2);
848 avghp1v = vec_avg(src1v, lv);
850 avgleftv = vec_avg(VSLD(avg0v, avghv, 1), avg0v);
851 avgrightv = vec_avg(VSLD(avg1v, avghp1v, 1), avg1v);
853 lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
854 #ifdef WORDS_BIGENDIAN
855 hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
857 hv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1);
860 vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
861 vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
862 vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dsth);
863 vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dsth);
865 lv = vec_sld(lv, lv, 8);
866 hv = vec_sld(hv, hv, 8);
868 vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dstv);
869 vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dstv);
870 vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dstc);
871 vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dstc);
874 src0 += src_stride*2;
882 static void mc_weight_w2_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
883 const x264_weight_t *weight, int i_height )
887 PREP_LOAD_SRC( src );
890 vec_s16_t scalev, offsetv, denomv, roundv;
893 int denom = weight->i_denom;
895 loadv.s[0] = weight->i_scale;
896 scalev = vec_splat( loadv.v, 0 );
898 loadv.s[0] = weight->i_offset;
899 offsetv = vec_splat( loadv.v, 0 );
904 denomv = vec_splat( loadv.v, 0 );
906 loadv.s[0] = 1<<(denom - 1);
907 roundv = vec_splat( loadv.v, 0 );
909 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
911 VEC_LOAD( src, srcv, 2, vec_u8_t, src );
912 weightv = vec_u8_to_s16( srcv );
914 weightv = vec_mladd( weightv, scalev, roundv );
915 weightv = vec_sra( weightv, (vec_u16_t)denomv );
916 weightv = vec_add( weightv, offsetv );
918 srcv = vec_packsu( weightv, zero_s16v );
919 vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
924 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
926 VEC_LOAD( src, srcv, 2, vec_u8_t, src );
927 weightv = vec_u8_to_s16( srcv );
929 weightv = vec_mladd( weightv, scalev, offsetv );
931 srcv = vec_packsu( weightv, zero_s16v );
932 vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
936 static void mc_weight_w4_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
937 const x264_weight_t *weight, int i_height )
941 PREP_LOAD_SRC( src );
944 vec_s16_t scalev, offsetv, denomv, roundv;
947 int denom = weight->i_denom;
949 loadv.s[0] = weight->i_scale;
950 scalev = vec_splat( loadv.v, 0 );
952 loadv.s[0] = weight->i_offset;
953 offsetv = vec_splat( loadv.v, 0 );
958 denomv = vec_splat( loadv.v, 0 );
960 loadv.s[0] = 1<<(denom - 1);
961 roundv = vec_splat( loadv.v, 0 );
963 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
965 VEC_LOAD( src, srcv, 4, vec_u8_t, src );
966 weightv = vec_u8_to_s16( srcv );
968 weightv = vec_mladd( weightv, scalev, roundv );
969 weightv = vec_sra( weightv, (vec_u16_t)denomv );
970 weightv = vec_add( weightv, offsetv );
972 srcv = vec_packsu( weightv, zero_s16v );
973 vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
978 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
980 VEC_LOAD( src, srcv, 4, vec_u8_t, src );
981 weightv = vec_u8_to_s16( srcv );
983 weightv = vec_mladd( weightv, scalev, offsetv );
985 srcv = vec_packsu( weightv, zero_s16v );
986 vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
990 static void mc_weight_w8_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
991 const x264_weight_t *weight, int i_height )
995 PREP_LOAD_SRC( src );
999 vec_s16_t scalev, offsetv, denomv, roundv;
1002 int denom = weight->i_denom;
1004 loadv.s[0] = weight->i_scale;
1005 scalev = vec_splat( loadv.v, 0 );
1007 loadv.s[0] = weight->i_offset;
1008 offsetv = vec_splat( loadv.v, 0 );
1013 denomv = vec_splat( loadv.v, 0 );
1015 loadv.s[0] = 1<<(denom - 1);
1016 roundv = vec_splat( loadv.v, 0 );
1018 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1020 VEC_LOAD( src, srcv, 8, vec_u8_t, src );
1021 weightv = vec_u8_to_s16( srcv );
1023 weightv = vec_mladd( weightv, scalev, roundv );
1024 weightv = vec_sra( weightv, (vec_u16_t)denomv );
1025 weightv = vec_add( weightv, offsetv );
1027 srcv = vec_packsu( weightv, zero_s16v );
1028 VEC_STORE8( srcv, dst );
1033 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1035 VEC_LOAD( src, srcv, 8, vec_u8_t, src );
1036 weightv = vec_u8_to_s16( srcv );
1038 weightv = vec_mladd( weightv, scalev, offsetv );
1040 srcv = vec_packsu( weightv, zero_s16v );
1041 VEC_STORE8( srcv, dst );
1045 static void mc_weight_w16_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
1046 const x264_weight_t *weight, int i_height )
1050 PREP_LOAD_SRC( src );
1052 vec_s16_t weight_lv, weight_hv;
1053 vec_s16_t scalev, offsetv, denomv, roundv;
1056 int denom = weight->i_denom;
1058 loadv.s[0] = weight->i_scale;
1059 scalev = vec_splat( loadv.v, 0 );
1061 loadv.s[0] = weight->i_offset;
1062 offsetv = vec_splat( loadv.v, 0 );
1067 denomv = vec_splat( loadv.v, 0 );
1069 loadv.s[0] = 1<<(denom - 1);
1070 roundv = vec_splat( loadv.v, 0 );
1072 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1074 VEC_LOAD( src, srcv, 16, vec_u8_t, src );
1075 weight_hv = vec_u8_to_s16_h( srcv );
1076 weight_lv = vec_u8_to_s16_l( srcv );
1078 weight_hv = vec_mladd( weight_hv, scalev, roundv );
1079 weight_lv = vec_mladd( weight_lv, scalev, roundv );
1080 weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
1081 weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
1082 weight_hv = vec_add( weight_hv, offsetv );
1083 weight_lv = vec_add( weight_lv, offsetv );
1085 srcv = vec_packsu( weight_hv, weight_lv );
1086 vec_st( srcv, 0, dst );
1091 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1093 VEC_LOAD( src, srcv, 16, vec_u8_t, src );
1094 weight_hv = vec_u8_to_s16_h( srcv );
1095 weight_lv = vec_u8_to_s16_l( srcv );
1097 weight_hv = vec_mladd( weight_hv, scalev, offsetv );
1098 weight_lv = vec_mladd( weight_lv, scalev, offsetv );
1100 srcv = vec_packsu( weight_hv, weight_lv );
1101 vec_st( srcv, 0, dst );
1105 static void mc_weight_w20_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
1106 const x264_weight_t *weight, int i_height )
1109 PREP_LOAD_SRC( src );
1110 vec_u8_t src_1v, src_2v, src_3v;
1111 vec_s16_t weight_lv, weight_hv, weight_3v;
1112 vec_s16_t scalev, offsetv, denomv, roundv;
1115 int denom = weight->i_denom;
1117 loadv.s[0] = weight->i_scale;
1118 scalev = vec_splat( loadv.v, 0 );
1120 loadv.s[0] = weight->i_offset;
1121 offsetv = vec_splat( loadv.v, 0 );
1126 denomv = vec_splat( loadv.v, 0 );
1128 loadv.s[0] = 1<<(denom - 1);
1129 roundv = vec_splat( loadv.v, 0 );
1131 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1133 src_1v = vec_ld( 0, src );
1134 src_2v = vec_ld( 16, src );
1135 src_3v = vec_ld( 19, src );
1136 src_1v = vec_perm( src_1v, src_2v, _src_ );
1137 src_3v = vec_perm( src_2v, src_3v, _src_ );
1138 weight_hv = vec_u8_to_s16_h( src_1v );
1139 weight_lv = vec_u8_to_s16_l( src_1v );
1140 weight_3v = vec_u8_to_s16_h( src_3v );
1142 weight_hv = vec_mladd( weight_hv, scalev, roundv );
1143 weight_lv = vec_mladd( weight_lv, scalev, roundv );
1144 weight_3v = vec_mladd( weight_3v, scalev, roundv );
1145 weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
1146 weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
1147 weight_3v = vec_sra( weight_3v, (vec_u16_t)denomv );
1148 weight_hv = vec_add( weight_hv, offsetv );
1149 weight_lv = vec_add( weight_lv, offsetv );
1150 weight_3v = vec_add( weight_3v, offsetv );
1152 src_1v = vec_packsu( weight_hv, weight_lv );
1153 src_3v = vec_packsu( weight_3v, zero_s16v );
1154 vec_st( src_1v, 0, dst );
1155 vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
1160 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1162 src_1v = vec_ld( 0, src );
1163 src_2v = vec_ld( 16, src );
1164 src_3v = vec_ld( 19, src );
1165 src_1v = vec_perm( src_1v, src_2v, _src_ );
1166 src_3v = vec_perm( src_2v, src_3v, _src_ );
1167 weight_hv = vec_u8_to_s16_h( src_1v );
1168 weight_lv = vec_u8_to_s16_l( src_1v );
1169 weight_3v = vec_u8_to_s16_h( src_3v );
1171 weight_hv = vec_mladd( weight_hv, scalev, offsetv );
1172 weight_lv = vec_mladd( weight_lv, scalev, offsetv );
1173 weight_3v = vec_mladd( weight_3v, scalev, offsetv );
1175 src_1v = vec_packsu( weight_hv, weight_lv );
1176 src_3v = vec_packsu( weight_3v, zero_s16v );
1177 vec_st( src_1v, 0, dst );
1178 vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
1183 static weight_fn_t x264_mc_weight_wtab_altivec[6] =
1185 mc_weight_w2_altivec,
1186 mc_weight_w4_altivec,
1187 mc_weight_w8_altivec,
1188 mc_weight_w16_altivec,
1189 mc_weight_w16_altivec,
1190 mc_weight_w20_altivec,
1193 #endif // !HIGH_BIT_DEPTH
1195 void x264_mc_altivec_init( x264_mc_functions_t *pf )
1198 pf->mc_luma = mc_luma_altivec;
1199 pf->get_ref = get_ref_altivec;
1200 pf->mc_chroma = mc_chroma_altivec;
1202 pf->copy_16x16_unaligned = x264_mc_copy_w16_altivec;
1203 pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_altivec;
1205 pf->hpel_filter = x264_hpel_filter_altivec;
1206 pf->frame_init_lowres_core = frame_init_lowres_core_altivec;
1208 pf->weight = x264_mc_weight_wtab_altivec;
1209 #endif // !HIGH_BIT_DEPTH