1 /*****************************************************************************
2 * mc.c: h264 encoder library (Motion Compensation)
3 *****************************************************************************
4 * Copyright (C) 2003-2008 x264 project
6 * Authors: Eric Petit <eric.petit@lapsus.org>
7 * Guillaume Poirier <gpoirier@mplayerhq.hu>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
22 *****************************************************************************/
31 #include "common/common.h"
32 #include "common/mc.h"
34 #include "ppccommon.h"
36 #if !X264_HIGH_BIT_DEPTH
37 typedef void (*pf_mc_t)( uint8_t *src, int i_src,
38 uint8_t *dst, int i_dst, int i_height );
41 static const uint8_t hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
42 static const uint8_t hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
45 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
47 return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] +
48 pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] +
51 static inline int x264_tapfilter1( uint8_t *pix )
53 return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] +
58 static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst, int i_dst,
59 uint8_t *src1, int i_src1,
60 uint8_t *src2, int i_height )
62 for( int y = 0; y < i_height; y++ )
64 for( int x = 0; x < 4; x++ )
65 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
72 static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst, int i_dst,
73 uint8_t *src1, int i_src1,
74 uint8_t *src2, int i_height )
76 vec_u8_t src1v, src2v;
79 PREP_LOAD_SRC( src1 );
80 PREP_LOAD_SRC( src2 );
82 for( int y = 0; y < i_height; y++ )
84 VEC_LOAD( src1, src1v, 8, vec_u8_t, src1 );
85 VEC_LOAD( src2, src2v, 8, vec_u8_t, src2 );
86 src1v = vec_avg( src1v, src2v );
87 VEC_STORE8( src1v, dst );
95 static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst, int i_dst,
96 uint8_t *src1, int i_src1,
97 uint8_t *src2, int i_height )
99 vec_u8_t src1v, src2v;
101 PREP_LOAD_SRC( src1 );
102 PREP_LOAD_SRC( src2 );
104 for( int y = 0; y < i_height; y++ )
106 VEC_LOAD( src1, src1v, 16, vec_u8_t, src1 );
107 VEC_LOAD( src2, src2v, 16, vec_u8_t, src2 );
108 src1v = vec_avg( src1v, src2v );
109 vec_st(src1v, 0, dst);
117 static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst, int i_dst,
118 uint8_t *src1, int i_src1,
119 uint8_t *src2, int i_height )
121 x264_pixel_avg2_w16_altivec(dst, i_dst, src1, i_src1, src2, i_height);
122 x264_pixel_avg2_w4_altivec(dst+16, i_dst, src1+16, i_src1, src2+16, i_height);
125 /* mc_copy: plain c */
127 #define MC_COPY( name, a ) \
128 static void name( uint8_t *dst, int i_dst, \
129 uint8_t *src, int i_src, int i_height ) \
132 for( y = 0; y < i_height; y++ ) \
134 memcpy( dst, src, a ); \
139 MC_COPY( x264_mc_copy_w4_altivec, 4 )
140 MC_COPY( x264_mc_copy_w8_altivec, 8 )
142 static void x264_mc_copy_w16_altivec( uint8_t *dst, int i_dst,
143 uint8_t *src, int i_src, int i_height )
147 PREP_LOAD_SRC( src );
149 for( int y = 0; y < i_height; y++)
151 VEC_LOAD( src, cpyV, 16, vec_u8_t, src );
152 vec_st(cpyV, 0, dst);
160 static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, int i_dst,
161 uint8_t *src, int i_src, int i_height )
163 for( int y = 0; y < i_height; ++y)
165 vec_u8_t cpyV = vec_ld( 0, src);
166 vec_st(cpyV, 0, dst);
174 static void mc_luma_altivec( uint8_t *dst, int i_dst_stride,
175 uint8_t *src[4], int i_src_stride,
177 int i_width, int i_height, const x264_weight_t *weight )
179 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
180 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
181 uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
182 if( qpel_idx & 5 ) /* qpel interpolation needed */
184 uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
189 x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
192 x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
196 x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
198 if( weight->weightfn )
199 weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
201 else if( weight->weightfn )
202 weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
208 x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
211 x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
214 x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
222 static uint8_t *get_ref_altivec( uint8_t *dst, int *i_dst_stride,
223 uint8_t *src[4], int i_src_stride,
225 int i_width, int i_height, const x264_weight_t *weight )
227 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
228 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
229 uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
230 if( qpel_idx & 5 ) /* qpel interpolation needed */
232 uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
236 x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
239 x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
244 x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
247 x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
250 if( weight->weightfn )
251 weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
254 else if( weight->weightfn )
256 weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
261 *i_dst_stride = i_src_stride;
266 static void mc_chroma_2xh( uint8_t *dst, int i_dst_stride,
267 uint8_t *src, int i_src_stride,
275 int cA = (8-d8x)*(8-d8y);
276 int cB = d8x *(8-d8y);
277 int cC = (8-d8x)*d8y;
280 src += (mvy >> 3) * i_src_stride + (mvx >> 3);
281 srcp = &src[i_src_stride];
283 for( int y = 0; y < i_height; y++ )
285 dst[0] = ( cA*src[0] + cB*src[0+1] + cC*srcp[0] + cD*srcp[0+1] + 32 ) >> 6;
286 dst[1] = ( cA*src[1] + cB*src[1+1] + cC*srcp[1] + cD*srcp[1+1] + 32 ) >> 6;
289 srcp += i_src_stride;
295 #define DO_PROCESS_W4( a ) \
296 dstv_16A = vec_mladd( src##a##v_16A, coeff##a##v, dstv_16A ); \
297 dstv_16B = vec_mladd( src##a##v_16B, coeff##a##v, dstv_16B )
299 static void mc_chroma_altivec_4xh( uint8_t *dst, int i_dst_stride,
300 uint8_t *src, int i_src_stride,
305 int d8x = mvx & 0x07;
306 int d8y = mvy & 0x07;
308 ALIGNED_16( uint16_t coeff[4] );
309 coeff[0] = (8-d8x)*(8-d8y);
310 coeff[1] = d8x *(8-d8y);
311 coeff[2] = (8-d8x)*d8y;
314 src += (mvy >> 3) * i_src_stride + (mvx >> 3);
315 srcp = &src[i_src_stride];
319 PREP_LOAD_SRC( src );
320 vec_u16_t coeff0v, coeff1v, coeff2v, coeff3v;
321 vec_u8_t src2v_8A, dstv_8A;
322 vec_u8_t src2v_8B, dstv_8B;
323 vec_u16_t src0v_16A, src1v_16A, src2v_16A, src3v_16A, dstv_16A;
324 vec_u16_t src0v_16B, src1v_16B, src2v_16B, src3v_16B, dstv_16B;
325 vec_u16_t shiftv, k32v;
327 coeff0v = vec_ld( 0, coeff );
328 coeff3v = vec_splat( coeff0v, 3 );
329 coeff2v = vec_splat( coeff0v, 2 );
330 coeff1v = vec_splat( coeff0v, 1 );
331 coeff0v = vec_splat( coeff0v, 0 );
332 k32v = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
333 shiftv = vec_splat_u16( 6 );
335 VEC_LOAD( src, src2v_8B, 5, vec_u8_t, src );
336 src2v_16B = vec_u8_to_u16( src2v_8B );
337 src3v_16B = vec_sld( src2v_16B, src2v_16B, 2 );
339 for( int y = 0; y < i_height; y += 2 )
341 src0v_16A = src2v_16B;
342 src1v_16A = src3v_16B;
344 VEC_LOAD_G( srcp, src2v_8A, 5, vec_u8_t );
345 srcp += i_src_stride;
346 VEC_LOAD_G( srcp, src2v_8B, 5, vec_u8_t );
347 srcp += i_src_stride;
348 src2v_16A = vec_u8_to_u16( src2v_8A );
349 src2v_16B = vec_u8_to_u16( src2v_8B );
350 src3v_16A = vec_sld( src2v_16A, src2v_16A, 2 );
351 src3v_16B = vec_sld( src2v_16B, src2v_16B, 2 );
353 src0v_16B = src2v_16A;
354 src1v_16B = src3v_16A;
356 dstv_16A = dstv_16B = k32v;
362 dstv_16A = vec_sr( dstv_16A, shiftv );
363 dstv_16B = vec_sr( dstv_16B, shiftv );
364 dstv_8A = vec_u16_to_u8( dstv_16A );
365 dstv_8B = vec_u16_to_u8( dstv_16B );
366 vec_ste( vec_splat( (vec_u32_t) dstv_8A, 0 ), 0, (uint32_t*) dst );
368 vec_ste( vec_splat( (vec_u32_t) dstv_8B, 0 ), 0, (uint32_t*) dst );
373 #define DO_PROCESS_W8( a ) \
374 src##a##v_16A = vec_u8_to_u16( src##a##v_8A ); \
375 src##a##v_16B = vec_u8_to_u16( src##a##v_8B ); \
376 dstv_16A = vec_mladd( src##a##v_16A, coeff##a##v, dstv_16A ); \
377 dstv_16B = vec_mladd( src##a##v_16B, coeff##a##v, dstv_16B )
379 static void mc_chroma_altivec_8xh( uint8_t *dst, int i_dst_stride,
380 uint8_t *src, int i_src_stride,
385 int d8x = mvx & 0x07;
386 int d8y = mvy & 0x07;
388 ALIGNED_16( uint16_t coeff[4] );
389 coeff[0] = (8-d8x)*(8-d8y);
390 coeff[1] = d8x *(8-d8y);
391 coeff[2] = (8-d8x)*d8y;
394 src += (mvy >> 3) * i_src_stride + (mvx >> 3);
395 srcp = &src[i_src_stride];
399 PREP_LOAD_SRC( src );
401 vec_u16_t coeff0v, coeff1v, coeff2v, coeff3v;
402 vec_u8_t src0v_8A, src1v_8A, src2v_8A, src3v_8A, dstv_8A;
403 vec_u8_t src0v_8B, src1v_8B, src2v_8B, src3v_8B, dstv_8B;
404 vec_u16_t src0v_16A, src1v_16A, src2v_16A, src3v_16A, dstv_16A;
405 vec_u16_t src0v_16B, src1v_16B, src2v_16B, src3v_16B, dstv_16B;
406 vec_u16_t shiftv, k32v;
408 coeff0v = vec_ld( 0, coeff );
409 coeff3v = vec_splat( coeff0v, 3 );
410 coeff2v = vec_splat( coeff0v, 2 );
411 coeff1v = vec_splat( coeff0v, 1 );
412 coeff0v = vec_splat( coeff0v, 0 );
413 k32v = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
414 shiftv = vec_splat_u16( 6 );
416 VEC_LOAD( src, src2v_8B, 9, vec_u8_t, src );
417 src3v_8B = vec_sld( src2v_8B, src2v_8B, 1 );
419 for( int y = 0; y < i_height; y+=2 )
424 VEC_LOAD_G( srcp, src2v_8A, 9, vec_u8_t );
425 srcp += i_src_stride;
426 VEC_LOAD_G( srcp, src2v_8B, 9, vec_u8_t );
427 srcp += i_src_stride;
428 src3v_8A = vec_sld( src2v_8A, src2v_8A, 1 );
429 src3v_8B = vec_sld( src2v_8B, src2v_8B, 1 );
433 dstv_16A = dstv_16B = k32v;
439 dstv_16A = vec_sr( dstv_16A, shiftv );
440 dstv_16B = vec_sr( dstv_16B, shiftv );
441 dstv_8A = vec_u16_to_u8( dstv_16A );
442 dstv_8B = vec_u16_to_u8( dstv_16B );
443 VEC_STORE8( dstv_8A, dst );
445 VEC_STORE8( dstv_8B, dst );
450 static void mc_chroma_altivec( uint8_t *dst, int i_dst_stride,
451 uint8_t *src, int i_src_stride,
453 int i_width, int i_height )
456 mc_chroma_altivec_8xh( dst, i_dst_stride, src, i_src_stride,
457 mvx, mvy, i_height );
458 else if( i_width == 4 )
459 mc_chroma_altivec_4xh( dst, i_dst_stride, src, i_src_stride,
460 mvx, mvy, i_height );
462 mc_chroma_2xh( dst, i_dst_stride, src, i_src_stride,
463 mvx, mvy, i_height );
466 #define HPEL_FILTER_1( t1v, t2v, t3v, t4v, t5v, t6v ) \
468 t1v = vec_add( t1v, t6v ); \
469 t2v = vec_add( t2v, t5v ); \
470 t3v = vec_add( t3v, t4v ); \
472 t1v = vec_sub( t1v, t2v ); /* (a-b) */ \
473 t2v = vec_sub( t2v, t3v ); /* (b-c) */ \
474 t2v = vec_sl( t2v, twov ); /* (b-c)*4 */ \
475 t1v = vec_sub( t1v, t2v ); /* a-5*b+4*c */ \
476 t3v = vec_sl( t3v, fourv ); /* 16*c */ \
477 t1v = vec_add( t1v, t3v ); /* a-5*b+20*c */ \
480 #define HPEL_FILTER_2( t1v, t2v, t3v, t4v, t5v, t6v ) \
482 t1v = vec_add( t1v, t6v ); \
483 t2v = vec_add( t2v, t5v ); \
484 t3v = vec_add( t3v, t4v ); \
486 t1v = vec_sub( t1v, t2v ); /* (a-b) */ \
487 t1v = vec_sra( t1v, twov ); /* (a-b)/4 */ \
488 t1v = vec_sub( t1v, t2v ); /* (a-b)/4-b */ \
489 t1v = vec_add( t1v, t3v ); /* (a-b)/4-b+c */ \
490 t1v = vec_sra( t1v, twov ); /* ((a-b)/4-b+c)/4 */ \
491 t1v = vec_add( t1v, t3v ); /* ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16 */ \
494 #define HPEL_FILTER_HORIZONTAL() \
496 VEC_LOAD_G( &src[x- 2+i_stride*y], src1v, 16, vec_u8_t); \
497 VEC_LOAD_G( &src[x+14+i_stride*y], src6v, 16, vec_u8_t); \
499 src2v = vec_sld( src1v, src6v, 1 ); \
500 src3v = vec_sld( src1v, src6v, 2 ); \
501 src4v = vec_sld( src1v, src6v, 3 ); \
502 src5v = vec_sld( src1v, src6v, 4 ); \
503 src6v = vec_sld( src1v, src6v, 5 ); \
505 temp1v = vec_u8_to_s16_h( src1v ); \
506 temp2v = vec_u8_to_s16_h( src2v ); \
507 temp3v = vec_u8_to_s16_h( src3v ); \
508 temp4v = vec_u8_to_s16_h( src4v ); \
509 temp5v = vec_u8_to_s16_h( src5v ); \
510 temp6v = vec_u8_to_s16_h( src6v ); \
512 HPEL_FILTER_1( temp1v, temp2v, temp3v, \
513 temp4v, temp5v, temp6v ); \
515 dest1v = vec_add( temp1v, sixteenv ); \
516 dest1v = vec_sra( dest1v, fivev ); \
518 temp1v = vec_u8_to_s16_l( src1v ); \
519 temp2v = vec_u8_to_s16_l( src2v ); \
520 temp3v = vec_u8_to_s16_l( src3v ); \
521 temp4v = vec_u8_to_s16_l( src4v ); \
522 temp5v = vec_u8_to_s16_l( src5v ); \
523 temp6v = vec_u8_to_s16_l( src6v ); \
525 HPEL_FILTER_1( temp1v, temp2v, temp3v, \
526 temp4v, temp5v, temp6v ); \
528 dest2v = vec_add( temp1v, sixteenv ); \
529 dest2v = vec_sra( dest2v, fivev ); \
531 destv = vec_packsu( dest1v, dest2v ); \
533 VEC_STORE16( destv, &dsth[x+i_stride*y], dsth ); \
536 #define HPEL_FILTER_VERTICAL() \
538 VEC_LOAD( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src ); \
539 VEC_LOAD( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src ); \
540 VEC_LOAD( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src ); \
541 VEC_LOAD( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src ); \
542 VEC_LOAD( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src ); \
543 VEC_LOAD( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src ); \
545 temp1v = vec_u8_to_s16_h( src1v ); \
546 temp2v = vec_u8_to_s16_h( src2v ); \
547 temp3v = vec_u8_to_s16_h( src3v ); \
548 temp4v = vec_u8_to_s16_h( src4v ); \
549 temp5v = vec_u8_to_s16_h( src5v ); \
550 temp6v = vec_u8_to_s16_h( src6v ); \
552 HPEL_FILTER_1( temp1v, temp2v, temp3v, \
553 temp4v, temp5v, temp6v ); \
555 dest1v = vec_add( temp1v, sixteenv ); \
556 dest1v = vec_sra( dest1v, fivev ); \
558 temp4v = vec_u8_to_s16_l( src1v ); \
559 temp5v = vec_u8_to_s16_l( src2v ); \
560 temp6v = vec_u8_to_s16_l( src3v ); \
561 temp7v = vec_u8_to_s16_l( src4v ); \
562 temp8v = vec_u8_to_s16_l( src5v ); \
563 temp9v = vec_u8_to_s16_l( src6v ); \
565 HPEL_FILTER_1( temp4v, temp5v, temp6v, \
566 temp7v, temp8v, temp9v ); \
568 dest2v = vec_add( temp4v, sixteenv ); \
569 dest2v = vec_sra( dest2v, fivev ); \
571 destv = vec_packsu( dest1v, dest2v ); \
573 VEC_STORE16( destv, &dstv[x+i_stride*y], dsth ); \
576 #define HPEL_FILTER_CENTRAL() \
578 temp1v = vec_sld( tempav, tempbv, 12 ); \
579 temp2v = vec_sld( tempav, tempbv, 14 ); \
581 temp4v = vec_sld( tempbv, tempcv, 2 ); \
582 temp5v = vec_sld( tempbv, tempcv, 4 ); \
583 temp6v = vec_sld( tempbv, tempcv, 6 ); \
585 HPEL_FILTER_2( temp1v, temp2v, temp3v, \
586 temp4v, temp5v, temp6v ); \
588 dest1v = vec_add( temp1v, thirtytwov ); \
589 dest1v = vec_sra( dest1v, sixv ); \
591 temp1v = vec_sld( tempbv, tempcv, 12 ); \
592 temp2v = vec_sld( tempbv, tempcv, 14 ); \
594 temp4v = vec_sld( tempcv, tempdv, 2 ); \
595 temp5v = vec_sld( tempcv, tempdv, 4 ); \
596 temp6v = vec_sld( tempcv, tempdv, 6 ); \
598 HPEL_FILTER_2( temp1v, temp2v, temp3v, \
599 temp4v, temp5v, temp6v ); \
601 dest2v = vec_add( temp1v, thirtytwov ); \
602 dest2v = vec_sra( dest2v, sixv ); \
604 destv = vec_packsu( dest1v, dest2v ); \
606 VEC_STORE16( destv, &dstc[x-16+i_stride*y], dsth ); \
609 void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
610 int i_stride, int i_width, int i_height, int16_t *buf )
613 vec_u8_t src1v, src2v, src3v, src4v, src5v, src6v;
614 vec_s16_t dest1v, dest2v;
615 vec_s16_t temp1v, temp2v, temp3v, temp4v, temp5v, temp6v, temp7v, temp8v, temp9v;
616 vec_s16_t tempav, tempbv, tempcv, tempdv, tempev;
621 PREP_STORE16_DST( dsth );
624 vec_u16_t twov, fourv, fivev, sixv;
625 vec_s16_t sixteenv, thirtytwov;
629 twov = vec_splat( temp_u.v, 0 );
631 fourv = vec_splat( temp_u.v, 0 );
633 fivev = vec_splat( temp_u.v, 0 );
635 sixv = vec_splat( temp_u.v, 0 );
637 sixteenv = (vec_s16_t)vec_splat( temp_u.v, 0 );
639 thirtytwov = (vec_s16_t)vec_splat( temp_u.v, 0 );
641 for( int y = 0; y < i_height; y++ )
645 /* horizontal_filter */
646 HPEL_FILTER_HORIZONTAL();
648 /* vertical_filter */
649 HPEL_FILTER_VERTICAL();
654 tempcv = vec_splat( temp1v, 0 ); /* first only */
658 for( x = 16; x < i_width; x+=16 )
660 /* horizontal_filter */
661 HPEL_FILTER_HORIZONTAL();
663 /* vertical_filter */
664 HPEL_FILTER_VERTICAL();
673 HPEL_FILTER_CENTRAL();
676 /* Partial vertical filter */
677 VEC_LOAD_PARTIAL( &src[x+i_stride*(y-2)], src1v, 16, vec_u8_t, src );
678 VEC_LOAD_PARTIAL( &src[x+i_stride*(y-1)], src2v, 16, vec_u8_t, src );
679 VEC_LOAD_PARTIAL( &src[x+i_stride*(y-0)], src3v, 16, vec_u8_t, src );
680 VEC_LOAD_PARTIAL( &src[x+i_stride*(y+1)], src4v, 16, vec_u8_t, src );
681 VEC_LOAD_PARTIAL( &src[x+i_stride*(y+2)], src5v, 16, vec_u8_t, src );
682 VEC_LOAD_PARTIAL( &src[x+i_stride*(y+3)], src6v, 16, vec_u8_t, src );
684 temp1v = vec_u8_to_s16_h( src1v );
685 temp2v = vec_u8_to_s16_h( src2v );
686 temp3v = vec_u8_to_s16_h( src3v );
687 temp4v = vec_u8_to_s16_h( src4v );
688 temp5v = vec_u8_to_s16_h( src5v );
689 temp6v = vec_u8_to_s16_h( src6v );
691 HPEL_FILTER_1( temp1v, temp2v, temp3v, temp4v, temp5v, temp6v );
698 /* tempev is not used */
700 HPEL_FILTER_CENTRAL();
704 static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
705 int src_stride, int dst_stride, int width, int height )
708 int end = (width & 15);
709 vec_u8_t src0v, src1v, src2v;
710 vec_u8_t lv, hv, src1p1v;
711 vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
712 static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
714 for( int y = 0; y < height; y++ )
717 uint8_t *src1 = src0+src_stride;
718 uint8_t *src2 = src1+src_stride;
720 src0v = vec_ld(0, src0);
721 src1v = vec_ld(0, src1);
722 src2v = vec_ld(0, src2);
724 avg0v = vec_avg(src0v, src1v);
725 avg1v = vec_avg(src1v, src2v);
727 for( x = 0; x < w; x++ )
729 lv = vec_ld(16*(x*2+1), src0);
730 src1v = vec_ld(16*(x*2+1), src1);
731 avghv = vec_avg(lv, src1v);
733 lv = vec_ld(16*(x*2+2), src0);
734 src1p1v = vec_ld(16*(x*2+2), src1);
735 avghp1v = vec_avg(lv, src1p1v);
737 avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
738 avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
740 vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
741 vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
745 hv = vec_ld(16*(x*2+1), src2);
746 avghv = vec_avg(src1v, hv);
748 hv = vec_ld(16*(x*2+2), src2);
749 avghp1v = vec_avg(src1p1v, hv);
751 avgleftv = vec_avg(vec_sld(avg1v, avghv, 1), avg1v);
752 avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
754 vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
755 vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
762 lv = vec_ld(16*(x*2+1), src0);
763 src1v = vec_ld(16*(x*2+1), src1);
764 avghv = vec_avg(lv, src1v);
766 lv = vec_ld(16*(x*2+1), src2);
767 avghp1v = vec_avg(src1v, lv);
769 avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
770 avgrightv = vec_avg(vec_sld(avg1v, avghp1v, 1), avg1v);
772 lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
773 hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
775 vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
776 vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
777 vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dsth);
778 vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dsth);
780 lv = vec_sld(lv, lv, 8);
781 hv = vec_sld(hv, hv, 8);
783 vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dstv);
784 vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dstv);
785 vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dstc);
786 vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dstc);
789 src0 += src_stride*2;
796 #endif // !X264_HIGH_BIT_DEPTH
798 void x264_mc_altivec_init( x264_mc_functions_t *pf )
800 #if !X264_HIGH_BIT_DEPTH
801 pf->mc_luma = mc_luma_altivec;
802 pf->get_ref = get_ref_altivec;
803 // pf->mc_chroma = mc_chroma_altivec;
805 pf->copy_16x16_unaligned = x264_mc_copy_w16_altivec;
806 pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_altivec;
808 pf->hpel_filter = x264_hpel_filter_altivec;
809 pf->frame_init_lowres_core = frame_init_lowres_core_altivec;
810 #endif // !X264_HIGH_BIT_DEPTH