+static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
+ intptr_t src_stride, intptr_t dst_stride, int width, int height )
+{
+ int w = width >> 4;
+ int end = (width & 15);
+ vec_u8_t src0v, src1v, src2v;
+ vec_u8_t lv, hv, src1p1v;
+ vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
+ static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
+#ifndef WORDS_BIGENDIAN
+ static const vec_u8_t inverse_bridge_shuffle_1 = CV(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F );
+#endif
+
+ for( int y = 0; y < height; y++ )
+ {
+ int x;
+ uint8_t *src1 = src0+src_stride;
+ uint8_t *src2 = src1+src_stride;
+
+ src0v = vec_ld(0, src0);
+ src1v = vec_ld(0, src1);
+ src2v = vec_ld(0, src2);
+
+ avg0v = vec_avg(src0v, src1v);
+ avg1v = vec_avg(src1v, src2v);
+
+ for( x = 0; x < w; x++ )
+ {
+ lv = vec_ld(16*(x*2+1), src0);
+ src1v = vec_ld(16*(x*2+1), src1);
+ avghv = vec_avg(lv, src1v);
+
+ lv = vec_ld(16*(x*2+2), src0);
+ src1p1v = vec_ld(16*(x*2+2), src1);
+ avghp1v = vec_avg(lv, src1p1v);
+
+ avgleftv = vec_avg(VSLD(avg0v, avghv, 1), avg0v);
+ avgrightv = vec_avg(VSLD(avghv, avghp1v, 1), avghv);
+
+ vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
+#ifdef WORDS_BIGENDIAN
+ vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
+#else
+ vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1), 16*x, dsth);
+#endif
+
+ avg0v = avghp1v;
+
+ hv = vec_ld(16*(x*2+1), src2);
+ avghv = vec_avg(src1v, hv);
+
+ hv = vec_ld(16*(x*2+2), src2);
+ avghp1v = vec_avg(src1p1v, hv);
+
+ avgleftv = vec_avg(VSLD(avg1v, avghv, 1), avg1v);
+ avgrightv = vec_avg(VSLD(avghv, avghp1v, 1), avghv);
+
+ vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
+#ifdef WORDS_BIGENDIAN
+ vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
+#else
+ vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1), 16*x, dstc);
+#endif
+
+ avg1v = avghp1v;
+
+ }
+ if( end )
+ {
+ lv = vec_ld(16*(x*2+1), src0);
+ src1v = vec_ld(16*(x*2+1), src1);
+ avghv = vec_avg(lv, src1v);
+
+ lv = vec_ld(16*(x*2+1), src2);
+ avghp1v = vec_avg(src1v, lv);
+
+ avgleftv = vec_avg(VSLD(avg0v, avghv, 1), avg0v);
+ avgrightv = vec_avg(VSLD(avg1v, avghp1v, 1), avg1v);
+
+ lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
+#ifdef WORDS_BIGENDIAN
+ hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
+#else
+ hv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1);
+#endif
+
+ vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
+ vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
+ vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dsth);
+ vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dsth);
+
+ lv = vec_sld(lv, lv, 8);
+ hv = vec_sld(hv, hv, 8);
+
+ vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dstv);
+ vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dstv);
+ vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dstc);
+ vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dstc);
+ }
+
+ src0 += src_stride*2;
+ dst0 += dst_stride;
+ dsth += dst_stride;
+ dstv += dst_stride;
+ dstc += dst_stride;
+ }
+}
+
+static void mc_weight_w2_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+ const x264_weight_t *weight, int i_height )
+{
+ LOAD_ZERO;
+ PREP_LOAD;
+ PREP_LOAD_SRC( src );
+ vec_u8_t srcv;
+ vec_s16_t weightv;
+ vec_s16_t scalev, offsetv, denomv, roundv;
+ vec_s16_u loadv;
+
+ int denom = weight->i_denom;
+
+ loadv.s[0] = weight->i_scale;
+ scalev = vec_splat( loadv.v, 0 );
+
+ loadv.s[0] = weight->i_offset;
+ offsetv = vec_splat( loadv.v, 0 );
+
+ if( denom >= 1 )
+ {
+ loadv.s[0] = denom;
+ denomv = vec_splat( loadv.v, 0 );
+
+ loadv.s[0] = 1<<(denom - 1);
+ roundv = vec_splat( loadv.v, 0 );
+
+ for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+ {
+ VEC_LOAD( src, srcv, 2, vec_u8_t, src );
+ weightv = vec_u8_to_s16( srcv );
+
+ weightv = vec_mladd( weightv, scalev, roundv );
+ weightv = vec_sra( weightv, (vec_u16_t)denomv );
+ weightv = vec_add( weightv, offsetv );
+
+ srcv = vec_packsu( weightv, zero_s16v );
+ vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
+ }
+ }
+ else
+ {
+ for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+ {
+ VEC_LOAD( src, srcv, 2, vec_u8_t, src );
+ weightv = vec_u8_to_s16( srcv );
+
+ weightv = vec_mladd( weightv, scalev, offsetv );
+
+ srcv = vec_packsu( weightv, zero_s16v );
+ vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
+ }
+ }
+}
+static void mc_weight_w4_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+ const x264_weight_t *weight, int i_height )
+{
+ LOAD_ZERO;
+ PREP_LOAD;
+ PREP_LOAD_SRC( src );
+ vec_u8_t srcv;
+ vec_s16_t weightv;
+ vec_s16_t scalev, offsetv, denomv, roundv;
+ vec_s16_u loadv;
+
+ int denom = weight->i_denom;
+
+ loadv.s[0] = weight->i_scale;
+ scalev = vec_splat( loadv.v, 0 );
+
+ loadv.s[0] = weight->i_offset;
+ offsetv = vec_splat( loadv.v, 0 );
+
+ if( denom >= 1 )
+ {
+ loadv.s[0] = denom;
+ denomv = vec_splat( loadv.v, 0 );
+
+ loadv.s[0] = 1<<(denom - 1);
+ roundv = vec_splat( loadv.v, 0 );
+
+ for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+ {
+ VEC_LOAD( src, srcv, 4, vec_u8_t, src );
+ weightv = vec_u8_to_s16( srcv );
+
+ weightv = vec_mladd( weightv, scalev, roundv );
+ weightv = vec_sra( weightv, (vec_u16_t)denomv );
+ weightv = vec_add( weightv, offsetv );
+
+ srcv = vec_packsu( weightv, zero_s16v );
+ vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
+ }
+ }
+ else
+ {
+ for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+ {
+ VEC_LOAD( src, srcv, 4, vec_u8_t, src );
+ weightv = vec_u8_to_s16( srcv );
+
+ weightv = vec_mladd( weightv, scalev, offsetv );
+
+ srcv = vec_packsu( weightv, zero_s16v );
+ vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
+ }
+ }
+}
+static void mc_weight_w8_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+ const x264_weight_t *weight, int i_height )
+{
+ LOAD_ZERO;
+ PREP_LOAD;
+ PREP_LOAD_SRC( src );
+ PREP_STORE8;
+ vec_u8_t srcv;
+ vec_s16_t weightv;
+ vec_s16_t scalev, offsetv, denomv, roundv;
+ vec_s16_u loadv;
+
+ int denom = weight->i_denom;
+
+ loadv.s[0] = weight->i_scale;
+ scalev = vec_splat( loadv.v, 0 );
+
+ loadv.s[0] = weight->i_offset;
+ offsetv = vec_splat( loadv.v, 0 );
+
+ if( denom >= 1 )
+ {
+ loadv.s[0] = denom;
+ denomv = vec_splat( loadv.v, 0 );
+
+ loadv.s[0] = 1<<(denom - 1);
+ roundv = vec_splat( loadv.v, 0 );
+
+ for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+ {
+ VEC_LOAD( src, srcv, 8, vec_u8_t, src );
+ weightv = vec_u8_to_s16( srcv );
+
+ weightv = vec_mladd( weightv, scalev, roundv );
+ weightv = vec_sra( weightv, (vec_u16_t)denomv );
+ weightv = vec_add( weightv, offsetv );
+
+ srcv = vec_packsu( weightv, zero_s16v );
+ VEC_STORE8( srcv, dst );
+ }
+ }
+ else
+ {
+ for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+ {
+ VEC_LOAD( src, srcv, 8, vec_u8_t, src );
+ weightv = vec_u8_to_s16( srcv );
+
+ weightv = vec_mladd( weightv, scalev, offsetv );
+
+ srcv = vec_packsu( weightv, zero_s16v );
+ VEC_STORE8( srcv, dst );
+ }
+ }
+}
+static void mc_weight_w16_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+ const x264_weight_t *weight, int i_height )
+{
+ LOAD_ZERO;
+ PREP_LOAD;
+ PREP_LOAD_SRC( src );
+ vec_u8_t srcv;
+ vec_s16_t weight_lv, weight_hv;
+ vec_s16_t scalev, offsetv, denomv, roundv;
+ vec_s16_u loadv;
+
+ int denom = weight->i_denom;
+
+ loadv.s[0] = weight->i_scale;
+ scalev = vec_splat( loadv.v, 0 );
+
+ loadv.s[0] = weight->i_offset;
+ offsetv = vec_splat( loadv.v, 0 );
+
+ if( denom >= 1 )
+ {
+ loadv.s[0] = denom;
+ denomv = vec_splat( loadv.v, 0 );
+
+ loadv.s[0] = 1<<(denom - 1);
+ roundv = vec_splat( loadv.v, 0 );
+
+ for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+ {
+ VEC_LOAD( src, srcv, 16, vec_u8_t, src );
+ weight_hv = vec_u8_to_s16_h( srcv );
+ weight_lv = vec_u8_to_s16_l( srcv );
+
+ weight_hv = vec_mladd( weight_hv, scalev, roundv );
+ weight_lv = vec_mladd( weight_lv, scalev, roundv );
+ weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
+ weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
+ weight_hv = vec_add( weight_hv, offsetv );
+ weight_lv = vec_add( weight_lv, offsetv );
+
+ srcv = vec_packsu( weight_hv, weight_lv );
+ vec_st( srcv, 0, dst );
+ }
+ }
+ else
+ {
+ for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+ {
+ VEC_LOAD( src, srcv, 16, vec_u8_t, src );
+ weight_hv = vec_u8_to_s16_h( srcv );
+ weight_lv = vec_u8_to_s16_l( srcv );
+
+ weight_hv = vec_mladd( weight_hv, scalev, offsetv );
+ weight_lv = vec_mladd( weight_lv, scalev, offsetv );
+
+ srcv = vec_packsu( weight_hv, weight_lv );
+ vec_st( srcv, 0, dst );
+ }
+ }
+}
+static void mc_weight_w20_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+ const x264_weight_t *weight, int i_height )
+{
+ LOAD_ZERO;
+ PREP_LOAD_SRC( src );
+ vec_u8_t src_1v, src_2v, src_3v;
+ vec_s16_t weight_lv, weight_hv, weight_3v;
+ vec_s16_t scalev, offsetv, denomv, roundv;
+ vec_s16_u loadv;
+
+ int denom = weight->i_denom;
+
+ loadv.s[0] = weight->i_scale;
+ scalev = vec_splat( loadv.v, 0 );
+
+ loadv.s[0] = weight->i_offset;
+ offsetv = vec_splat( loadv.v, 0 );
+
+ if( denom >= 1 )
+ {
+ loadv.s[0] = denom;
+ denomv = vec_splat( loadv.v, 0 );
+
+ loadv.s[0] = 1<<(denom - 1);
+ roundv = vec_splat( loadv.v, 0 );
+
+ for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+ {
+ src_1v = vec_ld( 0, src );
+ src_2v = vec_ld( 16, src );
+ src_3v = vec_ld( 19, src );
+ src_1v = vec_perm( src_1v, src_2v, _src_ );
+ src_3v = vec_perm( src_2v, src_3v, _src_ );
+ weight_hv = vec_u8_to_s16_h( src_1v );
+ weight_lv = vec_u8_to_s16_l( src_1v );
+ weight_3v = vec_u8_to_s16_h( src_3v );
+
+ weight_hv = vec_mladd( weight_hv, scalev, roundv );
+ weight_lv = vec_mladd( weight_lv, scalev, roundv );
+ weight_3v = vec_mladd( weight_3v, scalev, roundv );
+ weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
+ weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
+ weight_3v = vec_sra( weight_3v, (vec_u16_t)denomv );
+ weight_hv = vec_add( weight_hv, offsetv );
+ weight_lv = vec_add( weight_lv, offsetv );
+ weight_3v = vec_add( weight_3v, offsetv );
+
+ src_1v = vec_packsu( weight_hv, weight_lv );
+ src_3v = vec_packsu( weight_3v, zero_s16v );
+ vec_st( src_1v, 0, dst );
+ vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
+ }
+ }
+ else
+ {
+ for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+ {
+ src_1v = vec_ld( 0, src );
+ src_2v = vec_ld( 16, src );
+ src_3v = vec_ld( 19, src );
+ src_1v = vec_perm( src_1v, src_2v, _src_ );
+ src_3v = vec_perm( src_2v, src_3v, _src_ );
+ weight_hv = vec_u8_to_s16_h( src_1v );
+ weight_lv = vec_u8_to_s16_l( src_1v );
+ weight_3v = vec_u8_to_s16_h( src_3v );
+
+ weight_hv = vec_mladd( weight_hv, scalev, offsetv );
+ weight_lv = vec_mladd( weight_lv, scalev, offsetv );
+ weight_3v = vec_mladd( weight_3v, scalev, offsetv );
+
+ src_1v = vec_packsu( weight_hv, weight_lv );
+ src_3v = vec_packsu( weight_3v, zero_s16v );
+ vec_st( src_1v, 0, dst );
+ vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
+ }
+ }
+}
+
+static weight_fn_t x264_mc_weight_wtab_altivec[6] =
+{
+ mc_weight_w2_altivec,
+ mc_weight_w4_altivec,
+ mc_weight_w8_altivec,
+ mc_weight_w16_altivec,
+ mc_weight_w16_altivec,
+ mc_weight_w20_altivec,
+};
+
+#endif // !HIGH_BIT_DEPTH
+