1 /*****************************************************************************
2 * mc.c: ppc motion compensation
3 *****************************************************************************
4 * Copyright (C) 2003-2016 x264 project
6 * Authors: Eric Petit <eric.petit@lapsus.org>
7 * Guillaume Poirier <gpoirier@mplayerhq.hu>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 * This program is also available under a commercial proprietary license.
24 * For more information, contact us at licensing@x264.com.
25 *****************************************************************************/
27 #include "common/common.h"
29 #include "ppccommon.h"
32 typedef void (*pf_mc_t)( uint8_t *src, intptr_t i_src,
33 uint8_t *dst, intptr_t i_dst, int i_height );
35 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
37 return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] +
38 pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] +
42 static inline int x264_tapfilter1( uint8_t *pix )
44 return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] +
48 static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst, intptr_t i_dst,
49 uint8_t *src1, intptr_t i_src1,
50 uint8_t *src2, int i_height )
52 for( int y = 0; y < i_height; y++ )
54 for( int x = 0; x < 4; x++ )
55 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
62 static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst, intptr_t i_dst,
63 uint8_t *src1, intptr_t i_src1,
64 uint8_t *src2, int i_height )
66 vec_u8_t src1v, src2v;
69 PREP_LOAD_SRC( src1 );
70 PREP_LOAD_SRC( src2 );
72 for( int y = 0; y < i_height; y++ )
74 VEC_LOAD( src1, src1v, 8, vec_u8_t, src1 );
75 VEC_LOAD( src2, src2v, 8, vec_u8_t, src2 );
76 src1v = vec_avg( src1v, src2v );
77 VEC_STORE8( src1v, dst );
85 static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst, intptr_t i_dst,
86 uint8_t *src1, intptr_t i_src1,
87 uint8_t *src2, int i_height )
89 vec_u8_t src1v, src2v;
91 PREP_LOAD_SRC( src1 );
92 PREP_LOAD_SRC( src2 );
94 for( int y = 0; y < i_height; y++ )
96 VEC_LOAD( src1, src1v, 16, vec_u8_t, src1 );
97 VEC_LOAD( src2, src2v, 16, vec_u8_t, src2 );
98 src1v = vec_avg( src1v, src2v );
99 vec_st(src1v, 0, dst);
107 static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst, intptr_t i_dst,
108 uint8_t *src1, intptr_t i_src1,
109 uint8_t *src2, int i_height )
111 x264_pixel_avg2_w16_altivec(dst, i_dst, src1, i_src1, src2, i_height);
112 x264_pixel_avg2_w4_altivec(dst+16, i_dst, src1+16, i_src1, src2+16, i_height);
115 /* mc_copy: plain c */
117 #define MC_COPY( name, a ) \
118 static void name( uint8_t *dst, intptr_t i_dst, \
119 uint8_t *src, intptr_t i_src, int i_height ) \
122 for( y = 0; y < i_height; y++ ) \
124 memcpy( dst, src, a ); \
129 MC_COPY( x264_mc_copy_w4_altivec, 4 )
130 MC_COPY( x264_mc_copy_w8_altivec, 8 )
132 static void x264_mc_copy_w16_altivec( uint8_t *dst, intptr_t i_dst,
133 uint8_t *src, intptr_t i_src, int i_height )
137 PREP_LOAD_SRC( src );
139 for( int y = 0; y < i_height; y++ )
141 VEC_LOAD( src, cpyV, 16, vec_u8_t, src );
142 vec_st(cpyV, 0, dst);
150 static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, intptr_t i_dst,
151 uint8_t *src, intptr_t i_src, int i_height )
153 for( int y = 0; y < i_height; ++y )
155 vec_u8_t cpyV = vec_ld( 0, src );
156 vec_st(cpyV, 0, dst);
164 static void mc_luma_altivec( uint8_t *dst, intptr_t i_dst_stride,
165 uint8_t *src[4], intptr_t i_src_stride,
167 int i_width, int i_height, const x264_weight_t *weight )
169 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
170 intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
171 uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
172 if( qpel_idx & 5 ) /* qpel interpolation needed */
174 uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
179 x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
182 x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
186 x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
188 if( weight->weightfn )
189 weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
191 else if( weight->weightfn )
192 weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
198 x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
201 x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
204 x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
212 static uint8_t *get_ref_altivec( uint8_t *dst, intptr_t *i_dst_stride,
213 uint8_t *src[4], intptr_t i_src_stride,
215 int i_width, int i_height, const x264_weight_t *weight )
217 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
218 intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
219 uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
220 if( qpel_idx & 5 ) /* qpel interpolation needed */
222 uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
226 x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
229 x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
234 x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
237 x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
240 if( weight->weightfn )
241 weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
244 else if( weight->weightfn )
246 weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
251 *i_dst_stride = i_src_stride;
256 static void mc_chroma_2xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
257 uint8_t *src, intptr_t i_src_stride,
258 int mvx, int mvy, int i_height )
264 int cA = (8-d8x)*(8-d8y);
265 int cB = d8x *(8-d8y);
266 int cC = (8-d8x)*d8y;
269 src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
270 srcp = &src[i_src_stride];
272 for( int y = 0; y < i_height; y++ )
274 dstu[0] = ( cA*src[0] + cB*src[2] + cC*srcp[0] + cD*srcp[2] + 32 ) >> 6;
275 dstv[0] = ( cA*src[1] + cB*src[3] + cC*srcp[1] + cD*srcp[3] + 32 ) >> 6;
276 dstu[1] = ( cA*src[2] + cB*src[4] + cC*srcp[2] + cD*srcp[4] + 32 ) >> 6;
277 dstv[1] = ( cA*src[3] + cB*src[5] + cC*srcp[3] + cD*srcp[5] + 32 ) >> 6;
280 srcp += i_src_stride;
281 dstu += i_dst_stride;
282 dstv += i_dst_stride;
286 #ifdef WORDS_BIGENDIAN
287 #define VSLD(a,b,n) vec_sld(a,b,n)
289 #define VSLD(a,b,n) vec_sld(b,a,16-n)
292 static void mc_chroma_altivec_4xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
293 uint8_t *src, intptr_t i_src_stride,
294 int mvx, int mvy, int i_height )
297 int d8x = mvx & 0x07;
298 int d8y = mvy & 0x07;
300 ALIGNED_16( uint16_t coeff[4] );
301 coeff[0] = (8-d8x)*(8-d8y);
302 coeff[1] = d8x *(8-d8y);
303 coeff[2] = (8-d8x)*d8y;
306 src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
307 srcp = &src[i_src_stride];
311 PREP_LOAD_SRC( src );
312 vec_u16_t coeff0v, coeff1v, coeff2v, coeff3v;
313 vec_u8_t src2v_8, dstuv, dstvv;
314 vec_u16_t src0v_16, src1v_16, src2v_16, src3v_16, dstv16;
315 vec_u16_t shiftv, k32v;
317 #ifdef WORDS_BIGENDIAN
318 static const vec_u8_t perm0v = CV(1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13);
319 static const vec_u8_t perm1v = CV(3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15);
321 static const vec_u8_t perm0v = CV(0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12);
322 static const vec_u8_t perm1v = CV(2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14);
325 coeff0v = vec_ld( 0, coeff );
326 coeff3v = vec_splat( coeff0v, 3 );
327 coeff2v = vec_splat( coeff0v, 2 );
328 coeff1v = vec_splat( coeff0v, 1 );
329 coeff0v = vec_splat( coeff0v, 0 );
330 k32v = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
331 shiftv = vec_splat_u16( 6 );
333 VEC_LOAD( src, src2v_8, 9, vec_u8_t, src );
334 src2v_16 = vec_u8_to_u16( src2v_8 );
335 src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
337 for( int y = 0; y < i_height; y += 2 )
341 VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
342 src2v_16 = vec_u8_to_u16( src2v_8 );
343 src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
345 dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
346 dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
347 dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
348 dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
350 dstv16 = vec_sr( dstv16, shiftv );
352 dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
353 dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
354 vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
355 vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
357 srcp += i_src_stride;
358 dstu += i_dst_stride;
359 dstv += i_dst_stride;
363 VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
364 src2v_16 = vec_u8_to_u16( src2v_8 );
365 src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
367 dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
368 dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
369 dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
370 dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
372 dstv16 = vec_sr( dstv16, shiftv );
374 dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
375 dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
376 vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
377 vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
379 srcp += i_src_stride;
380 dstu += i_dst_stride;
381 dstv += i_dst_stride;
385 static void mc_chroma_altivec_8xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
386 uint8_t *src, intptr_t i_src_stride,
387 int mvx, int mvy, int i_height )
390 int d8x = mvx & 0x07;
391 int d8y = mvy & 0x07;
393 ALIGNED_16( uint16_t coeff[4] );
394 coeff[0] = (8-d8x)*(8-d8y);
395 coeff[1] = d8x *(8-d8y);
396 coeff[2] = (8-d8x)*d8y;
399 src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
400 srcp = &src[i_src_stride];
404 PREP_LOAD_SRC( src );
406 vec_u16_t coeff0v, coeff1v, coeff2v, coeff3v;
407 vec_u8_t src0v_8, src1v_8, src2v_8, src3v_8;
408 vec_u8_t dstuv, dstvv;
409 vec_u16_t src0v_16h, src1v_16h, src2v_16h, src3v_16h, dstv_16h;
410 vec_u16_t src0v_16l, src1v_16l, src2v_16l, src3v_16l, dstv_16l;
411 vec_u16_t shiftv, k32v;
413 coeff0v = vec_ld( 0, coeff );
414 coeff3v = vec_splat( coeff0v, 3 );
415 coeff2v = vec_splat( coeff0v, 2 );
416 coeff1v = vec_splat( coeff0v, 1 );
417 coeff0v = vec_splat( coeff0v, 0 );
418 k32v = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
419 shiftv = vec_splat_u16( 6 );
421 #ifdef WORDS_BIGENDIAN
422 static const vec_u8_t perm0v = CV(1,5,9,13,17,21,25,29,0,0,0,0,0,0,0,0);
423 static const vec_u8_t perm1v = CV(3,7,11,15,19,23,27,31,0,0,0,0,0,0,0,0);
425 static const vec_u8_t perm0v = CV(0,4,8,12,16,20,24,28,1,1,1,1,1,1,1,1);
426 static const vec_u8_t perm1v = CV(2,6,10,14,18,22,26,30,1,1,1,1,1,1,1,1);
429 VEC_LOAD( src, src2v_8, 16, vec_u8_t, src );
430 VEC_LOAD( src+16, src3v_8, 2, vec_u8_t, src );
431 src3v_8 = VSLD( src2v_8, src3v_8, 2 );
433 for( int y = 0; y < i_height; y += 2 )
437 VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
438 VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
440 src3v_8 = VSLD( src2v_8, src3v_8, 2 );
442 src0v_16h = vec_u8_to_u16_h( src0v_8 );
443 src0v_16l = vec_u8_to_u16_l( src0v_8 );
444 src1v_16h = vec_u8_to_u16_h( src1v_8 );
445 src1v_16l = vec_u8_to_u16_l( src1v_8 );
446 src2v_16h = vec_u8_to_u16_h( src2v_8 );
447 src2v_16l = vec_u8_to_u16_l( src2v_8 );
448 src3v_16h = vec_u8_to_u16_h( src3v_8 );
449 src3v_16l = vec_u8_to_u16_l( src3v_8 );
451 dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
452 dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
453 dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
454 dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
455 dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
456 dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
457 dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
458 dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
460 dstv_16h = vec_sr( dstv_16h, shiftv );
461 dstv_16l = vec_sr( dstv_16l, shiftv );
463 dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
464 dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
466 VEC_STORE8( dstuv, dstu );
467 VEC_STORE8( dstvv, dstv );
469 srcp += i_src_stride;
470 dstu += i_dst_stride;
471 dstv += i_dst_stride;
475 VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
476 VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
478 src3v_8 = VSLD( src2v_8, src3v_8, 2 );
480 src0v_16h = vec_u8_to_u16_h( src0v_8 );
481 src0v_16l = vec_u8_to_u16_l( src0v_8 );
482 src1v_16h = vec_u8_to_u16_h( src1v_8 );
483 src1v_16l = vec_u8_to_u16_l( src1v_8 );
484 src2v_16h = vec_u8_to_u16_h( src2v_8 );
485 src2v_16l = vec_u8_to_u16_l( src2v_8 );
486 src3v_16h = vec_u8_to_u16_h( src3v_8 );
487 src3v_16l = vec_u8_to_u16_l( src3v_8 );
489 dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
490 dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
491 dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
492 dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
493 dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
494 dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
495 dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
496 dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
498 dstv_16h = vec_sr( dstv_16h, shiftv );
499 dstv_16l = vec_sr( dstv_16l, shiftv );
501 dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
502 dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
504 VEC_STORE8( dstuv, dstu );
505 VEC_STORE8( dstvv, dstv );
507 srcp += i_src_stride;
508 dstu += i_dst_stride;
509 dstv += i_dst_stride;
513 static void mc_chroma_altivec( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
514 uint8_t *src, intptr_t i_src_stride,
515 int mvx, int mvy, int i_width, int i_height )
518 mc_chroma_altivec_8xh( dstu, dstv, i_dst_stride, src, i_src_stride,
519 mvx, mvy, i_height );
520 else if( i_width == 4 )
521 mc_chroma_altivec_4xh( dstu, dstv, i_dst_stride, src, i_src_stride,
522 mvx, mvy, i_height );
524 mc_chroma_2xh( dstu, dstv, i_dst_stride, src, i_src_stride,
525 mvx, mvy, i_height );
528 #define HPEL_FILTER_1( t1v, t2v, t3v, t4v, t5v, t6v ) \
530 t1v = vec_add( t1v, t6v ); \
531 t2v = vec_add( t2v, t5v ); \
532 t3v = vec_add( t3v, t4v ); \
534 t1v = vec_sub( t1v, t2v ); /* (a-b) */ \
535 t2v = vec_sub( t2v, t3v ); /* (b-c) */ \
536 t2v = vec_sl( t2v, twov ); /* (b-c)*4 */ \
537 t1v = vec_sub( t1v, t2v ); /* a-5*b+4*c */ \
538 t3v = vec_sl( t3v, fourv ); /* 16*c */ \
539 t1v = vec_add( t1v, t3v ); /* a-5*b+20*c */ \
542 #define HPEL_FILTER_2( t1v, t2v, t3v, t4v, t5v, t6v ) \
544 t1v = vec_add( t1v, t6v ); \
545 t2v = vec_add( t2v, t5v ); \
546 t3v = vec_add( t3v, t4v ); \
548 t1v = vec_sub( t1v, t2v ); /* (a-b) */ \
549 t1v = vec_sra( t1v, twov ); /* (a-b)/4 */ \
550 t1v = vec_sub( t1v, t2v ); /* (a-b)/4-b */ \
551 t1v = vec_add( t1v, t3v ); /* (a-b)/4-b+c */ \
552 t1v = vec_sra( t1v, twov ); /* ((a-b)/4-b+c)/4 */ \
553 t1v = vec_add( t1v, t3v ); /* ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16 */ \
556 #define HPEL_FILTER_HORIZONTAL() \
558 VEC_LOAD_G( &src[x- 2+i_stride*y], src1v, 16, vec_u8_t); \
559 VEC_LOAD_G( &src[x+14+i_stride*y], src6v, 16, vec_u8_t); \
561 src2v = VSLD( src1v, src6v, 1 ); \
562 src3v = VSLD( src1v, src6v, 2 ); \
563 src4v = VSLD( src1v, src6v, 3 ); \
564 src5v = VSLD( src1v, src6v, 4 ); \
565 src6v = VSLD( src1v, src6v, 5 ); \
567 temp1v = vec_u8_to_s16_h( src1v ); \
568 temp2v = vec_u8_to_s16_h( src2v ); \
569 temp3v = vec_u8_to_s16_h( src3v ); \
570 temp4v = vec_u8_to_s16_h( src4v ); \
571 temp5v = vec_u8_to_s16_h( src5v ); \
572 temp6v = vec_u8_to_s16_h( src6v ); \
574 HPEL_FILTER_1( temp1v, temp2v, temp3v, \
575 temp4v, temp5v, temp6v ); \
577 dest1v = vec_add( temp1v, sixteenv ); \
578 dest1v = vec_sra( dest1v, fivev ); \
580 temp1v = vec_u8_to_s16_l( src1v ); \
581 temp2v = vec_u8_to_s16_l( src2v ); \
582 temp3v = vec_u8_to_s16_l( src3v ); \
583 temp4v = vec_u8_to_s16_l( src4v ); \
584 temp5v = vec_u8_to_s16_l( src5v ); \
585 temp6v = vec_u8_to_s16_l( src6v ); \
587 HPEL_FILTER_1( temp1v, temp2v, temp3v, \
588 temp4v, temp5v, temp6v ); \
590 dest2v = vec_add( temp1v, sixteenv ); \
591 dest2v = vec_sra( dest2v, fivev ); \
593 destv = vec_packsu( dest1v, dest2v ); \
595 VEC_STORE16( destv, &dsth[x+i_stride*y], dsth ); \
598 #define HPEL_FILTER_VERTICAL() \
600 VEC_LOAD( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src ); \
601 VEC_LOAD( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src ); \
602 VEC_LOAD( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src ); \
603 VEC_LOAD( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src ); \
604 VEC_LOAD( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src ); \
605 VEC_LOAD( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src ); \
607 temp1v = vec_u8_to_s16_h( src1v ); \
608 temp2v = vec_u8_to_s16_h( src2v ); \
609 temp3v = vec_u8_to_s16_h( src3v ); \
610 temp4v = vec_u8_to_s16_h( src4v ); \
611 temp5v = vec_u8_to_s16_h( src5v ); \
612 temp6v = vec_u8_to_s16_h( src6v ); \
614 HPEL_FILTER_1( temp1v, temp2v, temp3v, \
615 temp4v, temp5v, temp6v ); \
617 dest1v = vec_add( temp1v, sixteenv ); \
618 dest1v = vec_sra( dest1v, fivev ); \
620 temp4v = vec_u8_to_s16_l( src1v ); \
621 temp5v = vec_u8_to_s16_l( src2v ); \
622 temp6v = vec_u8_to_s16_l( src3v ); \
623 temp7v = vec_u8_to_s16_l( src4v ); \
624 temp8v = vec_u8_to_s16_l( src5v ); \
625 temp9v = vec_u8_to_s16_l( src6v ); \
627 HPEL_FILTER_1( temp4v, temp5v, temp6v, \
628 temp7v, temp8v, temp9v ); \
630 dest2v = vec_add( temp4v, sixteenv ); \
631 dest2v = vec_sra( dest2v, fivev ); \
633 destv = vec_packsu( dest1v, dest2v ); \
635 VEC_STORE16( destv, &dstv[x+i_stride*y], dsth ); \
638 #define HPEL_FILTER_CENTRAL() \
640 temp1v = VSLD( tempav, tempbv, 12 ); \
641 temp2v = VSLD( tempav, tempbv, 14 ); \
643 temp4v = VSLD( tempbv, tempcv, 2 ); \
644 temp5v = VSLD( tempbv, tempcv, 4 ); \
645 temp6v = VSLD( tempbv, tempcv, 6 ); \
647 HPEL_FILTER_2( temp1v, temp2v, temp3v, \
648 temp4v, temp5v, temp6v ); \
650 dest1v = vec_add( temp1v, thirtytwov ); \
651 dest1v = vec_sra( dest1v, sixv ); \
653 temp1v = VSLD( tempbv, tempcv, 12 ); \
654 temp2v = VSLD( tempbv, tempcv, 14 ); \
656 temp4v = VSLD( tempcv, tempdv, 2 ); \
657 temp5v = VSLD( tempcv, tempdv, 4 ); \
658 temp6v = VSLD( tempcv, tempdv, 6 ); \
660 HPEL_FILTER_2( temp1v, temp2v, temp3v, \
661 temp4v, temp5v, temp6v ); \
663 dest2v = vec_add( temp1v, thirtytwov ); \
664 dest2v = vec_sra( dest2v, sixv ); \
666 destv = vec_packsu( dest1v, dest2v ); \
668 VEC_STORE16( destv, &dstc[x-16+i_stride*y], dsth ); \
671 void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
672 intptr_t i_stride, int i_width, int i_height, int16_t *buf )
675 vec_u8_t src1v, src2v, src3v, src4v, src5v, src6v;
676 vec_s16_t dest1v, dest2v;
677 vec_s16_t temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, temp8v, temp9v;
678 vec_s16_t tempav, tempbv, tempcv, tempdv, tempev;
683 PREP_STORE16_DST( dsth );
686 vec_u16_t twov, fourv, fivev, sixv;
687 vec_s16_t sixteenv, thirtytwov;
691 twov = vec_splat( temp_u.v, 0 );
693 fourv = vec_splat( temp_u.v, 0 );
695 fivev = vec_splat( temp_u.v, 0 );
697 sixv = vec_splat( temp_u.v, 0 );
699 sixteenv = (vec_s16_t)vec_splat( temp_u.v, 0 );
701 thirtytwov = (vec_s16_t)vec_splat( temp_u.v, 0 );
703 for( int y = 0; y < i_height; y++ )
707 /* horizontal_filter */
708 HPEL_FILTER_HORIZONTAL();
710 /* vertical_filter */
711 HPEL_FILTER_VERTICAL();
716 tempcv = vec_splat( temp1v, 0 ); /* first only */
720 for( x = 16; x < i_width; x+=16 )
722 /* horizontal_filter */
723 HPEL_FILTER_HORIZONTAL();
725 /* vertical_filter */
726 HPEL_FILTER_VERTICAL();
735 HPEL_FILTER_CENTRAL();
738 /* Partial vertical filter */
739 VEC_LOAD_PARTIAL( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src );
740 VEC_LOAD_PARTIAL( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src );
741 VEC_LOAD_PARTIAL( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src );
742 VEC_LOAD_PARTIAL( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src );
743 VEC_LOAD_PARTIAL( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src );
744 VEC_LOAD_PARTIAL( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src );
746 temp1v = vec_u8_to_s16_h( src1v );
747 temp2v = vec_u8_to_s16_h( src2v );
748 temp3v = vec_u8_to_s16_h( src3v );
749 temp4v = vec_u8_to_s16_h( src4v );
750 temp5v = vec_u8_to_s16_h( src5v );
751 temp6v = vec_u8_to_s16_h( src6v );
753 HPEL_FILTER_1( temp1v, temp2v, temp3v, temp4v, temp5v, temp6v );
760 /* tempev is not used */
762 HPEL_FILTER_CENTRAL();
766 static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
767 intptr_t src_stride, intptr_t dst_stride, int width, int height )
770 int end = (width & 15);
771 vec_u8_t src0v, src1v, src2v;
772 vec_u8_t lv, hv, src1p1v;
773 vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
774 static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
775 #ifndef WORDS_BIGENDIAN
776 static const vec_u8_t inverse_bridge_shuffle_1 = CV(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F );
779 for( int y = 0; y < height; y++ )
782 uint8_t *src1 = src0+src_stride;
783 uint8_t *src2 = src1+src_stride;
785 src0v = vec_ld(0, src0);
786 src1v = vec_ld(0, src1);
787 src2v = vec_ld(0, src2);
789 avg0v = vec_avg(src0v, src1v);
790 avg1v = vec_avg(src1v, src2v);
792 for( x = 0; x < w; x++ )
794 lv = vec_ld(16*(x*2+1), src0);
795 src1v = vec_ld(16*(x*2+1), src1);
796 avghv = vec_avg(lv, src1v);
798 lv = vec_ld(16*(x*2+2), src0);
799 src1p1v = vec_ld(16*(x*2+2), src1);
800 avghp1v = vec_avg(lv, src1p1v);
802 avgleftv = vec_avg(VSLD(avg0v, avghv, 1), avg0v);
803 avgrightv = vec_avg(VSLD(avghv, avghp1v, 1), avghv);
805 vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
806 #ifdef WORDS_BIGENDIAN
807 vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
809 vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1), 16*x, dsth);
814 hv = vec_ld(16*(x*2+1), src2);
815 avghv = vec_avg(src1v, hv);
817 hv = vec_ld(16*(x*2+2), src2);
818 avghp1v = vec_avg(src1p1v, hv);
820 avgleftv = vec_avg(VSLD(avg1v, avghv, 1), avg1v);
821 avgrightv = vec_avg(VSLD(avghv, avghp1v, 1), avghv);
823 vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
824 #ifdef WORDS_BIGENDIAN
825 vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
827 vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1), 16*x, dstc);
835 lv = vec_ld(16*(x*2+1), src0);
836 src1v = vec_ld(16*(x*2+1), src1);
837 avghv = vec_avg(lv, src1v);
839 lv = vec_ld(16*(x*2+1), src2);
840 avghp1v = vec_avg(src1v, lv);
842 avgleftv = vec_avg(VSLD(avg0v, avghv, 1), avg0v);
843 avgrightv = vec_avg(VSLD(avg1v, avghp1v, 1), avg1v);
845 lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
846 #ifdef WORDS_BIGENDIAN
847 hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
849 hv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1);
852 vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
853 vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
854 vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dsth);
855 vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dsth);
857 lv = vec_sld(lv, lv, 8);
858 hv = vec_sld(hv, hv, 8);
860 vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dstv);
861 vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dstv);
862 vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dstc);
863 vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dstc);
866 src0 += src_stride*2;
874 static void mc_weight_w2_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
875 const x264_weight_t *weight, int i_height )
879 PREP_LOAD_SRC( src );
882 vec_s16_t scalev, offsetv, denomv, roundv;
885 int denom = weight->i_denom;
887 loadv.s[0] = weight->i_scale;
888 scalev = vec_splat( loadv.v, 0 );
890 loadv.s[0] = weight->i_offset;
891 offsetv = vec_splat( loadv.v, 0 );
896 denomv = vec_splat( loadv.v, 0 );
898 loadv.s[0] = 1<<(denom - 1);
899 roundv = vec_splat( loadv.v, 0 );
901 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
903 VEC_LOAD( src, srcv, 2, vec_u8_t, src );
904 weightv = vec_u8_to_s16( srcv );
906 weightv = vec_mladd( weightv, scalev, roundv );
907 weightv = vec_sra( weightv, (vec_u16_t)denomv );
908 weightv = vec_add( weightv, offsetv );
910 srcv = vec_packsu( weightv, zero_s16v );
911 vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
916 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
918 VEC_LOAD( src, srcv, 2, vec_u8_t, src );
919 weightv = vec_u8_to_s16( srcv );
921 weightv = vec_mladd( weightv, scalev, offsetv );
923 srcv = vec_packsu( weightv, zero_s16v );
924 vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
928 static void mc_weight_w4_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
929 const x264_weight_t *weight, int i_height )
933 PREP_LOAD_SRC( src );
936 vec_s16_t scalev, offsetv, denomv, roundv;
939 int denom = weight->i_denom;
941 loadv.s[0] = weight->i_scale;
942 scalev = vec_splat( loadv.v, 0 );
944 loadv.s[0] = weight->i_offset;
945 offsetv = vec_splat( loadv.v, 0 );
950 denomv = vec_splat( loadv.v, 0 );
952 loadv.s[0] = 1<<(denom - 1);
953 roundv = vec_splat( loadv.v, 0 );
955 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
957 VEC_LOAD( src, srcv, 4, vec_u8_t, src );
958 weightv = vec_u8_to_s16( srcv );
960 weightv = vec_mladd( weightv, scalev, roundv );
961 weightv = vec_sra( weightv, (vec_u16_t)denomv );
962 weightv = vec_add( weightv, offsetv );
964 srcv = vec_packsu( weightv, zero_s16v );
965 vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
970 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
972 VEC_LOAD( src, srcv, 4, vec_u8_t, src );
973 weightv = vec_u8_to_s16( srcv );
975 weightv = vec_mladd( weightv, scalev, offsetv );
977 srcv = vec_packsu( weightv, zero_s16v );
978 vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
982 static void mc_weight_w8_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
983 const x264_weight_t *weight, int i_height )
987 PREP_LOAD_SRC( src );
991 vec_s16_t scalev, offsetv, denomv, roundv;
994 int denom = weight->i_denom;
996 loadv.s[0] = weight->i_scale;
997 scalev = vec_splat( loadv.v, 0 );
999 loadv.s[0] = weight->i_offset;
1000 offsetv = vec_splat( loadv.v, 0 );
1005 denomv = vec_splat( loadv.v, 0 );
1007 loadv.s[0] = 1<<(denom - 1);
1008 roundv = vec_splat( loadv.v, 0 );
1010 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1012 VEC_LOAD( src, srcv, 8, vec_u8_t, src );
1013 weightv = vec_u8_to_s16( srcv );
1015 weightv = vec_mladd( weightv, scalev, roundv );
1016 weightv = vec_sra( weightv, (vec_u16_t)denomv );
1017 weightv = vec_add( weightv, offsetv );
1019 srcv = vec_packsu( weightv, zero_s16v );
1020 VEC_STORE8( srcv, dst );
1025 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1027 VEC_LOAD( src, srcv, 8, vec_u8_t, src );
1028 weightv = vec_u8_to_s16( srcv );
1030 weightv = vec_mladd( weightv, scalev, offsetv );
1032 srcv = vec_packsu( weightv, zero_s16v );
1033 VEC_STORE8( srcv, dst );
1037 static void mc_weight_w16_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
1038 const x264_weight_t *weight, int i_height )
1042 PREP_LOAD_SRC( src );
1044 vec_s16_t weight_lv, weight_hv;
1045 vec_s16_t scalev, offsetv, denomv, roundv;
1048 int denom = weight->i_denom;
1050 loadv.s[0] = weight->i_scale;
1051 scalev = vec_splat( loadv.v, 0 );
1053 loadv.s[0] = weight->i_offset;
1054 offsetv = vec_splat( loadv.v, 0 );
1059 denomv = vec_splat( loadv.v, 0 );
1061 loadv.s[0] = 1<<(denom - 1);
1062 roundv = vec_splat( loadv.v, 0 );
1064 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1066 VEC_LOAD( src, srcv, 16, vec_u8_t, src );
1067 weight_hv = vec_u8_to_s16_h( srcv );
1068 weight_lv = vec_u8_to_s16_l( srcv );
1070 weight_hv = vec_mladd( weight_hv, scalev, roundv );
1071 weight_lv = vec_mladd( weight_lv, scalev, roundv );
1072 weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
1073 weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
1074 weight_hv = vec_add( weight_hv, offsetv );
1075 weight_lv = vec_add( weight_lv, offsetv );
1077 srcv = vec_packsu( weight_hv, weight_lv );
1078 vec_st( srcv, 0, dst );
1083 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1085 VEC_LOAD( src, srcv, 16, vec_u8_t, src );
1086 weight_hv = vec_u8_to_s16_h( srcv );
1087 weight_lv = vec_u8_to_s16_l( srcv );
1089 weight_hv = vec_mladd( weight_hv, scalev, offsetv );
1090 weight_lv = vec_mladd( weight_lv, scalev, offsetv );
1092 srcv = vec_packsu( weight_hv, weight_lv );
1093 vec_st( srcv, 0, dst );
1097 static void mc_weight_w20_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
1098 const x264_weight_t *weight, int i_height )
1101 PREP_LOAD_SRC( src );
1102 vec_u8_t src_1v, src_2v, src_3v;
1103 vec_s16_t weight_lv, weight_hv, weight_3v;
1104 vec_s16_t scalev, offsetv, denomv, roundv;
1107 int denom = weight->i_denom;
1109 loadv.s[0] = weight->i_scale;
1110 scalev = vec_splat( loadv.v, 0 );
1112 loadv.s[0] = weight->i_offset;
1113 offsetv = vec_splat( loadv.v, 0 );
1118 denomv = vec_splat( loadv.v, 0 );
1120 loadv.s[0] = 1<<(denom - 1);
1121 roundv = vec_splat( loadv.v, 0 );
1123 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1125 src_1v = vec_ld( 0, src );
1126 src_2v = vec_ld( 16, src );
1127 src_3v = vec_ld( 19, src );
1128 src_1v = vec_perm( src_1v, src_2v, _src_ );
1129 src_3v = vec_perm( src_2v, src_3v, _src_ );
1130 weight_hv = vec_u8_to_s16_h( src_1v );
1131 weight_lv = vec_u8_to_s16_l( src_1v );
1132 weight_3v = vec_u8_to_s16_h( src_3v );
1134 weight_hv = vec_mladd( weight_hv, scalev, roundv );
1135 weight_lv = vec_mladd( weight_lv, scalev, roundv );
1136 weight_3v = vec_mladd( weight_3v, scalev, roundv );
1137 weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
1138 weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
1139 weight_3v = vec_sra( weight_3v, (vec_u16_t)denomv );
1140 weight_hv = vec_add( weight_hv, offsetv );
1141 weight_lv = vec_add( weight_lv, offsetv );
1142 weight_3v = vec_add( weight_3v, offsetv );
1144 src_1v = vec_packsu( weight_hv, weight_lv );
1145 src_3v = vec_packsu( weight_3v, zero_s16v );
1146 vec_st( src_1v, 0, dst );
1147 vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
1152 for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
1154 src_1v = vec_ld( 0, src );
1155 src_2v = vec_ld( 16, src );
1156 src_3v = vec_ld( 19, src );
1157 src_1v = vec_perm( src_1v, src_2v, _src_ );
1158 src_3v = vec_perm( src_2v, src_3v, _src_ );
1159 weight_hv = vec_u8_to_s16_h( src_1v );
1160 weight_lv = vec_u8_to_s16_l( src_1v );
1161 weight_3v = vec_u8_to_s16_h( src_3v );
1163 weight_hv = vec_mladd( weight_hv, scalev, offsetv );
1164 weight_lv = vec_mladd( weight_lv, scalev, offsetv );
1165 weight_3v = vec_mladd( weight_3v, scalev, offsetv );
1167 src_1v = vec_packsu( weight_hv, weight_lv );
1168 src_3v = vec_packsu( weight_3v, zero_s16v );
1169 vec_st( src_1v, 0, dst );
1170 vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
1175 static weight_fn_t x264_mc_weight_wtab_altivec[6] =
1177 mc_weight_w2_altivec,
1178 mc_weight_w4_altivec,
1179 mc_weight_w8_altivec,
1180 mc_weight_w16_altivec,
1181 mc_weight_w16_altivec,
1182 mc_weight_w20_altivec,
1185 #endif // !HIGH_BIT_DEPTH
1187 void x264_mc_altivec_init( x264_mc_functions_t *pf )
1190 pf->mc_luma = mc_luma_altivec;
1191 pf->get_ref = get_ref_altivec;
1192 pf->mc_chroma = mc_chroma_altivec;
1194 pf->copy_16x16_unaligned = x264_mc_copy_w16_altivec;
1195 pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_altivec;
1197 pf->hpel_filter = x264_hpel_filter_altivec;
1198 pf->frame_init_lowres_core = frame_init_lowres_core_altivec;
1200 pf->weight = x264_mc_weight_wtab_altivec;
1201 #endif // !HIGH_BIT_DEPTH