]> git.sesse.net Git - x264/blobdiff - common/ppc/mc.c
Bump dates to 2015
[x264] / common / ppc / mc.c
index 81d5c5f35bdaf1f907ed0eef5e32e0548ab26050..0dfedda1b9f8c7ff148f1747011aa478aa2d1336 100644 (file)
@@ -1,7 +1,7 @@
 /*****************************************************************************
- * mc.c: h264 encoder library (Motion Compensation)
+ * mc.c: ppc motion compensation
  *****************************************************************************
- * Copyright (C) 2003-2008 x264 project
+ * Copyright (C) 2003-2015 x264 project
  *
  * Authors: Eric Petit <eric.petit@lapsus.org>
  *          Guillaume Poirier <gpoirier@mplayerhq.hu>
@@ -19,6 +19,9 @@
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at licensing@x264.com.
  *****************************************************************************/
 
 #include <stdlib.h>
 #include "mc.h"
 #include "ppccommon.h"
 
-typedef void (*pf_mc_t)( uint8_t *src, int i_src,
-                         uint8_t *dst, int i_dst, int i_height );
-
-
-static const int hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
-static const int hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
-
+#if !HIGH_BIT_DEPTH
+typedef void (*pf_mc_t)( uint8_t *src, intptr_t i_src,
+                         uint8_t *dst, intptr_t i_dst, int i_height );
 
 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
 {
@@ -47,42 +46,38 @@ static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
            pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] +
            pix[ 3*i_pix_next];
 }
+
 static inline int x264_tapfilter1( uint8_t *pix )
 {
     return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] +
            pix[ 3];
 }
 
-
-static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst,  int i_dst,
-                                               uint8_t *src1, int i_src1,
+static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst,  intptr_t i_dst,
+                                               uint8_t *src1, intptr_t i_src1,
                                                uint8_t *src2, int i_height )
 {
-    int x, y;
-    for( y = 0; y < i_height; y++ )
+    for( int y = 0; y < i_height; y++ )
     {
-        for( x = 0; x < 4; x++ )
-        {
+        for( int x = 0; x < 4; x++ )
             dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
-        }
         dst  += i_dst;
         src1 += i_src1;
         src2 += i_src1;
     }
 }
 
-static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst,  int i_dst,
-                                               uint8_t *src1, int i_src1,
+static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst,  intptr_t i_dst,
+                                               uint8_t *src1, intptr_t i_src1,
                                                uint8_t *src2, int i_height )
 {
-    int y;
     vec_u8_t src1v, src2v;
     PREP_LOAD;
     PREP_STORE8;
     PREP_LOAD_SRC( src1 );
     PREP_LOAD_SRC( src2 );
 
-    for( y = 0; y < i_height; y++ )
+    for( int y = 0; y < i_height; y++ )
     {
         VEC_LOAD( src1, src1v, 8, vec_u8_t, src1 );
         VEC_LOAD( src2, src2v, 8, vec_u8_t, src2 );
@@ -95,17 +90,16 @@ static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst,  int i_dst,
     }
 }
 
-static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst,  int i_dst,
-                                                uint8_t *src1, int i_src1,
+static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst,  intptr_t i_dst,
+                                                uint8_t *src1, intptr_t i_src1,
                                                 uint8_t *src2, int i_height )
 {
-    int y;
     vec_u8_t src1v, src2v;
     PREP_LOAD;
     PREP_LOAD_SRC( src1 );
     PREP_LOAD_SRC( src2 );
 
-    for( y = 0; y < i_height; y++ )
+    for( int y = 0; y < i_height; y++ )
     {
         VEC_LOAD( src1, src1v, 16, vec_u8_t, src1 );
         VEC_LOAD( src2, src2v, 16, vec_u8_t, src2 );
@@ -118,8 +112,8 @@ static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst,  int i_dst,
     }
 }
 
-static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst,  int i_dst,
-                                                uint8_t *src1, int i_src1,
+static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst,  intptr_t i_dst,
+                                                uint8_t *src1, intptr_t i_src1,
                                                 uint8_t *src2, int i_height )
 {
     x264_pixel_avg2_w16_altivec(dst, i_dst, src1, i_src1, src2, i_height);
@@ -129,8 +123,8 @@ static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst,  int i_dst,
 /* mc_copy: plain c */
 
 #define MC_COPY( name, a )                                \
-static void name( uint8_t *dst, int i_dst,                \
-                  uint8_t *src, int i_src, int i_height ) \
+static void name( uint8_t *dst, intptr_t i_dst,           \
+                  uint8_t *src, intptr_t i_src, int i_height ) \
 {                                                         \
     int y;                                                \
     for( y = 0; y < i_height; y++ )                       \
@@ -143,15 +137,14 @@ static void name( uint8_t *dst, int i_dst,                \
 MC_COPY( x264_mc_copy_w4_altivec,  4  )
 MC_COPY( x264_mc_copy_w8_altivec,  8  )
 
-static void x264_mc_copy_w16_altivec( uint8_t *dst, int i_dst,
-                                      uint8_t *src, int i_src, int i_height )
+static void x264_mc_copy_w16_altivec( uint8_t *dst, intptr_t i_dst,
+                                      uint8_t *src, intptr_t i_src, int i_height )
 {
-    int y;
     vec_u8_t cpyV;
     PREP_LOAD;
     PREP_LOAD_SRC( src );
 
-    for( y = 0; y < i_height; y++)
+    for( int y = 0; y < i_height; y++ )
     {
         VEC_LOAD( src, cpyV, 16, vec_u8_t, src );
         vec_st(cpyV, 0, dst);
@@ -162,76 +155,103 @@ static void x264_mc_copy_w16_altivec( uint8_t *dst, int i_dst,
 }
 
 
-static void mc_luma_altivec( uint8_t *dst,    int i_dst_stride,
-                             uint8_t *src[4], int i_src_stride,
+static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, intptr_t i_dst,
+                                              uint8_t *src, intptr_t i_src, int i_height )
+{
+    for( int y = 0; y < i_height; ++y )
+    {
+        vec_u8_t cpyV = vec_ld( 0, src );
+        vec_st(cpyV, 0, dst);
+
+        src += i_src;
+        dst += i_dst;
+    }
+}
+
+
+static void mc_luma_altivec( uint8_t *dst,    intptr_t i_dst_stride,
+                             uint8_t *src[4], intptr_t i_src_stride,
                              int mvx, int mvy,
-                             int i_width, int i_height )
+                             int i_width, int i_height, const x264_weight_t *weight )
 {
     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
-    int offset = (mvy>>2)*i_src_stride + (mvx>>2);
-    uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
+    intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
+    uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
     if( qpel_idx & 5 ) /* qpel interpolation needed */
     {
-        uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
-
-        switch(i_width) {
-        case 4:
-            x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
-            break;
-        case 8:
-            x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
-            break;
-        case 16:
-        default:
-            x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
-        }
+        uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
 
+        switch( i_width )
+        {
+            case 4:
+                x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
+                break;
+            case 8:
+                x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
+                break;
+            case 16:
+            default:
+                x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
+        }
+        if( weight->weightfn )
+            weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
     }
+    else if( weight->weightfn )
+        weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
     else
     {
-        switch(i_width) {
-        case 4:
-            x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
-            break;
-        case 8:
-            x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
-            break;
-        case 16:
-            x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
-            break;
+        switch( i_width )
+        {
+            case 4:
+                x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
+                break;
+            case 8:
+                x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
+                break;
+            case 16:
+                x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
+                break;
         }
     }
 }
 
 
 
-static uint8_t *get_ref_altivec( uint8_t *dst,   int *i_dst_stride,
-                                 uint8_t *src[4], int i_src_stride,
+static uint8_t *get_ref_altivec( uint8_t *dst,   intptr_t *i_dst_stride,
+                                 uint8_t *src[4], intptr_t i_src_stride,
                                  int mvx, int mvy,
-                                 int i_width, int i_height )
+                                 int i_width, int i_height, const x264_weight_t *weight )
 {
     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
-    int offset = (mvy>>2)*i_src_stride + (mvx>>2);
-    uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
+    intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
+    uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
     if( qpel_idx & 5 ) /* qpel interpolation needed */
     {
-        uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
-        switch(i_width) {
-        case 4:
-            x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
-            break;
-        case 8:
-            x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
-            break;
-        case 12:
-        case 16:
-        default:
-            x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
-            break;
-        case 20:
-            x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
-            break;
+        uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
+        switch( i_width )
+        {
+            case 4:
+                x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
+                break;
+            case 8:
+                x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
+                break;
+            case 12:
+            case 16:
+            default:
+                x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
+                break;
+            case 20:
+                x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
+                break;
         }
+        if( weight->weightfn )
+            weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
+        return dst;
+    }
+    else if( weight->weightfn )
+    {
+        weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
         return dst;
     }
     else
@@ -241,42 +261,63 @@ static uint8_t *get_ref_altivec( uint8_t *dst,   int *i_dst_stride,
     }
 }
 
-#define DO_PROCESS(a) \
-        src##a##v_16 = vec_u8_to_u16( src##a##v_8 ); \
-        src##a##v_16 = vec_mladd( coeff##a##v, src##a##v_16, zero_u16v ); \
-        dstv_16      = vec_add( dstv_16, src##a##v_16 )
+static void mc_chroma_2xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
+                           uint8_t *src, intptr_t i_src_stride,
+                           int mvx, int mvy, int i_height )
+{
+    uint8_t *srcp;
+    int d8x = mvx&0x07;
+    int d8y = mvy&0x07;
+
+    int cA = (8-d8x)*(8-d8y);
+    int cB = d8x    *(8-d8y);
+    int cC = (8-d8x)*d8y;
+    int cD = d8x    *d8y;
+
+    src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
+    srcp = &src[i_src_stride];
 
-static void mc_chroma_altivec_4xh( uint8_t *dst, int i_dst_stride,
-                                   uint8_t *src, int i_src_stride,
-                                   int mvx, int mvy,
-                                   int i_height )
+    for( int y = 0; y < i_height; y++ )
+    {
+        dstu[0] = ( cA*src[0] + cB*src[2] + cC*srcp[0] + cD*srcp[2] + 32 ) >> 6;
+        dstv[0] = ( cA*src[1] + cB*src[3] + cC*srcp[1] + cD*srcp[3] + 32 ) >> 6;
+        dstu[1] = ( cA*src[2] + cB*src[4] + cC*srcp[2] + cD*srcp[4] + 32 ) >> 6;
+        dstv[1] = ( cA*src[3] + cB*src[5] + cC*srcp[3] + cD*srcp[5] + 32 ) >> 6;
+
+        src  += i_src_stride;
+        srcp += i_src_stride;
+        dstu += i_dst_stride;
+        dstv += i_dst_stride;
+    }
+ }
+
+static void mc_chroma_altivec_4xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
+                                   uint8_t *src, intptr_t i_src_stride,
+                                   int mvx, int mvy, int i_height )
 {
     uint8_t *srcp;
-    int y;
     int d8x = mvx & 0x07;
     int d8y = mvy & 0x07;
 
-    DECLARE_ALIGNED_16( uint16_t coeff[4] );
+    ALIGNED_16( uint16_t coeff[4] );
     coeff[0] = (8-d8x)*(8-d8y);
     coeff[1] = d8x    *(8-d8y);
     coeff[2] = (8-d8x)*d8y;
     coeff[3] = d8x    *d8y;
 
-    src  += (mvy >> 3) * i_src_stride + (mvx >> 3);
-    srcp  = &src[i_src_stride];
+    src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
+    srcp = &src[i_src_stride];
 
     LOAD_ZERO;
     PREP_LOAD;
     PREP_LOAD_SRC( src );
-    PREP_STORE4;
     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
-    vec_u8_t    src0v_8, src1v_8, src2v_8, src3v_8;
-    vec_u16_t   src0v_16, src1v_16, src2v_16, src3v_16;
-    vec_u8_t    dstv_8;
-    vec_u16_t   dstv_16;
-    vec_u8_t    permv;
-    vec_u16_t   shiftv;
-    vec_u16_t   k32v;
+    vec_u8_t    src2v_8, dstuv, dstvv;
+    vec_u16_t   src0v_16, src1v_16, src2v_16, src3v_16, dstv16;
+    vec_u16_t   shiftv, k32v;
+
+    static const vec_u8_t perm0v = CV(1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13);
+    static const vec_u8_t perm1v = CV(3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15);
 
     coeff0v = vec_ld( 0, coeff );
     coeff3v = vec_splat( coeff0v, 3 );
@@ -284,53 +325,76 @@ static void mc_chroma_altivec_4xh( uint8_t *dst, int i_dst_stride,
     coeff1v = vec_splat( coeff0v, 1 );
     coeff0v = vec_splat( coeff0v, 0 );
     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
-    permv   = vec_lvsl( 0, (uint8_t *) 1 );
     shiftv  = vec_splat_u16( 6 );
 
-    VEC_LOAD( src, src2v_8, 5, vec_u8_t, src );
-    src3v_8 = vec_perm( src2v_8, src2v_8, permv );
+    VEC_LOAD( src, src2v_8, 9, vec_u8_t, src );
+    src2v_16 = vec_u8_to_u16( src2v_8 );
+    src3v_16 = vec_u8_to_u16( vec_sld( src2v_8, src2v_8, 2 ) );
 
-    for( y = 0; y < i_height; y++ )
+    for( int y = 0; y < i_height; y += 2 )
     {
-        src0v_8 = src2v_8;
-        src1v_8 = src3v_8;
-        VEC_LOAD_G( srcp, src2v_8, 5, vec_u8_t );
-        src3v_8 = vec_perm( src2v_8, src2v_8, permv );
+        src0v_16 = src2v_16;
+        src1v_16 = src3v_16;
+        VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
+        src2v_16 = vec_u8_to_u16( src2v_8 );
+        src3v_16 = vec_u8_to_u16( vec_sld( src2v_8, src2v_8, 2 ) );
 
-        dstv_16 = k32v;
+        dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
+        dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
+        dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
+        dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
 
-        DO_PROCESS( 0 );
-        DO_PROCESS( 1 );
-        DO_PROCESS( 2 );
-        DO_PROCESS( 3 );
+        dstv16 = vec_sr( dstv16, shiftv );
 
-        dstv_16 = vec_sr( dstv_16, shiftv );
-        dstv_8  = vec_u16_to_u8( dstv_16 );
-        VEC_STORE4( dstv_8, dst );
+        dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
+        dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
+        vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
+        vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
 
-        dst  += i_dst_stride;
         srcp += i_src_stride;
+        dstu += i_dst_stride;
+        dstv += i_dst_stride;
+
+        src0v_16 = src2v_16;
+        src1v_16 = src3v_16;
+        VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
+        src2v_16 = vec_u8_to_u16( src2v_8 );
+        src3v_16 = vec_u8_to_u16( vec_sld( src2v_8, src2v_8, 2 ) );
+
+        dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
+        dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
+        dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
+        dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
+
+        dstv16 = vec_sr( dstv16, shiftv );
+
+        dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
+        dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
+        vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
+        vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
+
+        srcp += i_src_stride;
+        dstu += i_dst_stride;
+        dstv += i_dst_stride;
     }
 }
 
-static void mc_chroma_altivec_8xh( uint8_t *dst, int i_dst_stride,
-                                   uint8_t *src, int i_src_stride,
-                                   int mvx, int mvy,
-                                   int i_height )
+static void mc_chroma_altivec_8xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
+                                   uint8_t *src, intptr_t i_src_stride,
+                                   int mvx, int mvy, int i_height )
 {
     uint8_t *srcp;
-    int y;
     int d8x = mvx & 0x07;
     int d8y = mvy & 0x07;
 
-    DECLARE_ALIGNED_16( uint16_t coeff[4] );
+    ALIGNED_16( uint16_t coeff[4] );
     coeff[0] = (8-d8x)*(8-d8y);
     coeff[1] = d8x    *(8-d8y);
     coeff[2] = (8-d8x)*d8y;
     coeff[3] = d8x    *d8y;
 
-    src  += (mvy >> 3) * i_src_stride + (mvx >> 3);
-    srcp  = &src[i_src_stride];
+    src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
+    srcp = &src[i_src_stride];
 
     LOAD_ZERO;
     PREP_LOAD;
@@ -338,12 +402,10 @@ static void mc_chroma_altivec_8xh( uint8_t *dst, int i_dst_stride,
     PREP_STORE8;
     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
     vec_u8_t    src0v_8, src1v_8, src2v_8, src3v_8;
-    vec_u16_t   src0v_16, src1v_16, src2v_16, src3v_16;
-    vec_u8_t    dstv_8;
-    vec_u16_t   dstv_16;
-    vec_u8_t    permv;
-    vec_u16_t   shiftv;
-    vec_u16_t   k32v;
+    vec_u8_t    dstuv, dstvv;
+    vec_u16_t   src0v_16h, src1v_16h, src2v_16h, src3v_16h, dstv_16h;
+    vec_u16_t   src0v_16l, src1v_16l, src2v_16l, src3v_16l, dstv_16l;
+    vec_u16_t   shiftv, k32v;
 
     coeff0v = vec_ld( 0, coeff );
     coeff3v = vec_splat( coeff0v, 3 );
@@ -351,50 +413,108 @@ static void mc_chroma_altivec_8xh( uint8_t *dst, int i_dst_stride,
     coeff1v = vec_splat( coeff0v, 1 );
     coeff0v = vec_splat( coeff0v, 0 );
     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
-    permv   = vec_lvsl( 0, (uint8_t *) 1 );
     shiftv  = vec_splat_u16( 6 );
 
-    VEC_LOAD( src, src2v_8, 9, vec_u8_t, src);
-    src3v_8 = vec_perm( src2v_8, src2v_8, permv );
+    static const vec_u8_t perm0v = CV(1,5,9,13,17,21,25,29,0,0,0,0,0,0,0,0);
+    static const vec_u8_t perm1v = CV(3,7,11,15,19,23,27,31,0,0,0,0,0,0,0,0);
+
+    VEC_LOAD( src, src2v_8, 16, vec_u8_t, src );
+    VEC_LOAD( src+16, src3v_8, 2, vec_u8_t, src );
+    src3v_8 = vec_sld( src2v_8, src3v_8, 2 );
 
-    for( y = 0; y < i_height; y++ )
+    for( int y = 0; y < i_height; y += 2 )
     {
         src0v_8 = src2v_8;
         src1v_8 = src3v_8;
-        VEC_LOAD_G( srcp, src2v_8, 9, vec_u8_t );
-        src3v_8 = vec_perm( src2v_8, src2v_8, permv );
+        VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
+        VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
+
+        src3v_8 = vec_sld( src2v_8, src3v_8, 2 );
+
+        src0v_16h = vec_u8_to_u16_h( src0v_8 );
+        src0v_16l = vec_u8_to_u16_l( src0v_8 );
+        src1v_16h = vec_u8_to_u16_h( src1v_8 );
+        src1v_16l = vec_u8_to_u16_l( src1v_8 );
+        src2v_16h = vec_u8_to_u16_h( src2v_8 );
+        src2v_16l = vec_u8_to_u16_l( src2v_8 );
+        src3v_16h = vec_u8_to_u16_h( src3v_8 );
+        src3v_16l = vec_u8_to_u16_l( src3v_8 );
+
+        dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
+        dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
+        dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
+        dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
+        dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
+        dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
+        dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
+        dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
+
+        dstv_16h = vec_sr( dstv_16h, shiftv );
+        dstv_16l = vec_sr( dstv_16l, shiftv );
+
+        dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
+        dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
+
+        VEC_STORE8( dstuv, dstu );
+        VEC_STORE8( dstvv, dstv );
 
-        dstv_16 = k32v;
-
-        DO_PROCESS( 0 );
-        DO_PROCESS( 1 );
-        DO_PROCESS( 2 );
-        DO_PROCESS( 3 );
+        srcp += i_src_stride;
+        dstu += i_dst_stride;
+        dstv += i_dst_stride;
 
-        dstv_16 = vec_sr( dstv_16, shiftv );
-        dstv_8  = vec_u16_to_u8( dstv_16 );
-        VEC_STORE8( dstv_8, dst );
+        src0v_8 = src2v_8;
+        src1v_8 = src3v_8;
+        VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
+        VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
+
+        src3v_8 = vec_sld( src2v_8, src3v_8, 2 );
+
+        src0v_16h = vec_u8_to_u16_h( src0v_8 );
+        src0v_16l = vec_u8_to_u16_l( src0v_8 );
+        src1v_16h = vec_u8_to_u16_h( src1v_8 );
+        src1v_16l = vec_u8_to_u16_l( src1v_8 );
+        src2v_16h = vec_u8_to_u16_h( src2v_8 );
+        src2v_16l = vec_u8_to_u16_l( src2v_8 );
+        src3v_16h = vec_u8_to_u16_h( src3v_8 );
+        src3v_16l = vec_u8_to_u16_l( src3v_8 );
+
+        dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
+        dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
+        dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
+        dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
+        dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
+        dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
+        dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
+        dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
+
+        dstv_16h = vec_sr( dstv_16h, shiftv );
+        dstv_16l = vec_sr( dstv_16l, shiftv );
+
+        dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
+        dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
+
+        VEC_STORE8( dstuv, dstu );
+        VEC_STORE8( dstvv, dstv );
 
-        dst  += i_dst_stride;
         srcp += i_src_stride;
+        dstu += i_dst_stride;
+        dstv += i_dst_stride;
     }
 }
 
-static void mc_chroma_altivec( uint8_t *dst, int i_dst_stride,
-                               uint8_t *src, int i_src_stride,
-                               int mvx, int mvy,
-                               int i_width, int i_height )
+static void mc_chroma_altivec( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
+                               uint8_t *src, intptr_t i_src_stride,
+                               int mvx, int mvy, int i_width, int i_height )
 {
     if( i_width == 8 )
-    {
-        mc_chroma_altivec_8xh( dst, i_dst_stride, src, i_src_stride,
+        mc_chroma_altivec_8xh( dstu, dstv, i_dst_stride, src, i_src_stride,
                                mvx, mvy, i_height );
-    }
-    else
-    {
-        mc_chroma_altivec_4xh( dst, i_dst_stride, src, i_src_stride,
+    else if( i_width == 4 )
+        mc_chroma_altivec_4xh( dstu, dstv, i_dst_stride, src, i_src_stride,
                                mvx, mvy, i_height );
-    }
+    else
+        mc_chroma_2xh( dstu, dstv, i_dst_stride, src, i_src_stride,
+                       mvx, mvy, i_height );
 }
 
 #define HPEL_FILTER_1( t1v, t2v, t3v, t4v, t5v, t6v ) \
@@ -541,10 +661,8 @@ static void mc_chroma_altivec( uint8_t *dst, int i_dst_stride,
 }
 
 void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
-                               int i_stride, int i_width, int i_height, int16_t *buf )
+                               intptr_t i_stride, int i_width, int i_height, int16_t *buf )
 {
-    int x, y;
-
     vec_u8_t destv;
     vec_u8_t src1v, src2v, src3v, src4v, src5v, src6v;
     vec_s16_t dest1v, dest2v;
@@ -574,9 +692,9 @@ void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint
     temp_u.s[0]=32;
     thirtytwov = (vec_s16_t)vec_splat( temp_u.v, 0 );
 
-    for( y = 0; y < i_height; y++ )
+    for( int y = 0; y < i_height; y++ )
     {
-        x = 0;
+        int x = 0;
 
         /* horizontal_filter */
         HPEL_FILTER_HORIZONTAL();
@@ -624,8 +742,7 @@ void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint
         temp5v = vec_u8_to_s16_h( src5v );
         temp6v = vec_u8_to_s16_h( src6v );
 
-        HPEL_FILTER_1( temp1v, temp2v, temp3v,
-                       temp4v, temp5v, temp6v );
+        HPEL_FILTER_1( temp1v, temp2v, temp3v, temp4v, temp5v, temp6v );
 
         /* central_filter */
         tempav = tempcv;
@@ -638,11 +755,425 @@ void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint
     }
 }
 
+static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
+                                            intptr_t src_stride, intptr_t dst_stride, int width, int height )
+{
+    int w = width >> 4;
+    int end = (width & 15);
+    vec_u8_t src0v, src1v, src2v;
+    vec_u8_t lv, hv, src1p1v;
+    vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
+    static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
+
+    for( int y = 0; y < height; y++ )
+    {
+        int x;
+        uint8_t *src1 = src0+src_stride;
+        uint8_t *src2 = src1+src_stride;
+
+        src0v = vec_ld(0, src0);
+        src1v = vec_ld(0, src1);
+        src2v = vec_ld(0, src2);
+
+        avg0v = vec_avg(src0v, src1v);
+        avg1v = vec_avg(src1v, src2v);
+
+        for( x = 0; x < w; x++ )
+        {
+            lv = vec_ld(16*(x*2+1), src0);
+            src1v = vec_ld(16*(x*2+1), src1);
+            avghv = vec_avg(lv, src1v);
+
+            lv = vec_ld(16*(x*2+2), src0);
+            src1p1v = vec_ld(16*(x*2+2), src1);
+            avghp1v = vec_avg(lv, src1p1v);
+
+            avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
+            avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
+
+            vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
+            vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
+
+            avg0v = avghp1v;
+
+            hv = vec_ld(16*(x*2+1), src2);
+            avghv = vec_avg(src1v, hv);
+
+            hv = vec_ld(16*(x*2+2), src2);
+            avghp1v = vec_avg(src1p1v, hv);
+
+            avgleftv = vec_avg(vec_sld(avg1v, avghv, 1), avg1v);
+            avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
+
+            vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
+            vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
+
+            avg1v = avghp1v;
+
+        }
+        if( end )
+        {
+            lv = vec_ld(16*(x*2+1), src0);
+            src1v = vec_ld(16*(x*2+1), src1);
+            avghv = vec_avg(lv, src1v);
+
+            lv = vec_ld(16*(x*2+1), src2);
+            avghp1v = vec_avg(src1v, lv);
+
+            avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
+            avgrightv = vec_avg(vec_sld(avg1v, avghp1v, 1), avg1v);
+
+            lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
+            hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
+
+            vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
+            vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
+            vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dsth);
+            vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dsth);
+
+            lv = vec_sld(lv, lv, 8);
+            hv = vec_sld(hv, hv, 8);
+
+            vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dstv);
+            vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dstv);
+            vec_ste((vec_u32_t)hv,16*x,(uint32_t*)dstc);
+            vec_ste((vec_u32_t)hv,16*x+4,(uint32_t*)dstc);
+        }
+
+        src0 += src_stride*2;
+        dst0 += dst_stride;
+        dsth += dst_stride;
+        dstv += dst_stride;
+        dstc += dst_stride;
+    }
+}
+
+static void mc_weight_w2_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+                                  const x264_weight_t *weight, int i_height )
+{
+    LOAD_ZERO;
+    PREP_LOAD;
+    PREP_LOAD_SRC( src );
+    vec_u8_t srcv;
+    vec_s16_t weightv;
+    vec_s16_t scalev, offsetv, denomv, roundv;
+    vec_s16_u loadv;
+
+    int denom = weight->i_denom;
+
+    loadv.s[0] = weight->i_scale;
+    scalev = vec_splat( loadv.v, 0 );
+
+    loadv.s[0] = weight->i_offset;
+    offsetv = vec_splat( loadv.v, 0 );
+
+    if( denom >= 1 )
+    {
+        loadv.s[0] = denom;
+        denomv = vec_splat( loadv.v, 0 );
+
+        loadv.s[0] = 1<<(denom - 1);
+        roundv = vec_splat( loadv.v, 0 );
+
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 2, vec_u8_t, src );
+            weightv = vec_u8_to_s16( srcv );
+
+            weightv = vec_mladd( weightv, scalev, roundv );
+            weightv = vec_sra( weightv, (vec_u16_t)denomv );
+            weightv = vec_add( weightv, offsetv );
+
+            srcv = vec_packsu( weightv, zero_s16v );
+            vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
+        }
+    }
+    else
+    {
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 2, vec_u8_t, src );
+            weightv = vec_u8_to_s16( srcv );
+
+            weightv = vec_mladd( weightv, scalev, offsetv );
+
+            srcv = vec_packsu( weightv, zero_s16v );
+            vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
+        }
+    }
+}
+static void mc_weight_w4_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+                                  const x264_weight_t *weight, int i_height )
+{
+    LOAD_ZERO;
+    PREP_LOAD;
+    PREP_LOAD_SRC( src );
+    vec_u8_t srcv;
+    vec_s16_t weightv;
+    vec_s16_t scalev, offsetv, denomv, roundv;
+    vec_s16_u loadv;
+
+    int denom = weight->i_denom;
+
+    loadv.s[0] = weight->i_scale;
+    scalev = vec_splat( loadv.v, 0 );
+
+    loadv.s[0] = weight->i_offset;
+    offsetv = vec_splat( loadv.v, 0 );
+
+    if( denom >= 1 )
+    {
+        loadv.s[0] = denom;
+        denomv = vec_splat( loadv.v, 0 );
+
+        loadv.s[0] = 1<<(denom - 1);
+        roundv = vec_splat( loadv.v, 0 );
+
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 4, vec_u8_t, src );
+            weightv = vec_u8_to_s16( srcv );
+
+            weightv = vec_mladd( weightv, scalev, roundv );
+            weightv = vec_sra( weightv, (vec_u16_t)denomv );
+            weightv = vec_add( weightv, offsetv );
+
+            srcv = vec_packsu( weightv, zero_s16v );
+            vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
+        }
+    }
+    else
+    {
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 4, vec_u8_t, src );
+            weightv = vec_u8_to_s16( srcv );
+
+            weightv = vec_mladd( weightv, scalev, offsetv );
+
+            srcv = vec_packsu( weightv, zero_s16v );
+            vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
+        }
+    }
+}
+static void mc_weight_w8_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+                                  const x264_weight_t *weight, int i_height )
+{
+    LOAD_ZERO;
+    PREP_LOAD;
+    PREP_LOAD_SRC( src );
+    PREP_STORE8;
+    vec_u8_t srcv;
+    vec_s16_t weightv;
+    vec_s16_t scalev, offsetv, denomv, roundv;
+    vec_s16_u loadv;
+
+    int denom = weight->i_denom;
+
+    loadv.s[0] = weight->i_scale;
+    scalev = vec_splat( loadv.v, 0 );
+
+    loadv.s[0] = weight->i_offset;
+    offsetv = vec_splat( loadv.v, 0 );
+
+    if( denom >= 1 )
+    {
+        loadv.s[0] = denom;
+        denomv = vec_splat( loadv.v, 0 );
+
+        loadv.s[0] = 1<<(denom - 1);
+        roundv = vec_splat( loadv.v, 0 );
+
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 8, vec_u8_t, src );
+            weightv = vec_u8_to_s16( srcv );
+
+            weightv = vec_mladd( weightv, scalev, roundv );
+            weightv = vec_sra( weightv, (vec_u16_t)denomv );
+            weightv = vec_add( weightv, offsetv );
+
+            srcv = vec_packsu( weightv, zero_s16v );
+            VEC_STORE8( srcv, dst );
+        }
+    }
+    else
+    {
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 8, vec_u8_t, src );
+            weightv = vec_u8_to_s16( srcv );
+
+            weightv = vec_mladd( weightv, scalev, offsetv );
+
+            srcv = vec_packsu( weightv, zero_s16v );
+            VEC_STORE8( srcv, dst );
+        }
+    }
+}
+static void mc_weight_w16_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+                                   const x264_weight_t *weight, int i_height )
+{
+    LOAD_ZERO;
+    PREP_LOAD;
+    PREP_LOAD_SRC( src );
+    vec_u8_t srcv;
+    vec_s16_t weight_lv, weight_hv;
+    vec_s16_t scalev, offsetv, denomv, roundv;
+    vec_s16_u loadv;
+
+    int denom = weight->i_denom;
+
+    loadv.s[0] = weight->i_scale;
+    scalev = vec_splat( loadv.v, 0 );
+
+    loadv.s[0] = weight->i_offset;
+    offsetv = vec_splat( loadv.v, 0 );
+
+    if( denom >= 1 )
+    {
+        loadv.s[0] = denom;
+        denomv = vec_splat( loadv.v, 0 );
+
+        loadv.s[0] = 1<<(denom - 1);
+        roundv = vec_splat( loadv.v, 0 );
+
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 16, vec_u8_t, src );
+            weight_hv = vec_u8_to_s16_h( srcv );
+            weight_lv = vec_u8_to_s16_l( srcv );
+
+            weight_hv = vec_mladd( weight_hv, scalev, roundv );
+            weight_lv = vec_mladd( weight_lv, scalev, roundv );
+            weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
+            weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
+            weight_hv = vec_add( weight_hv, offsetv );
+            weight_lv = vec_add( weight_lv, offsetv );
+
+            srcv = vec_packsu( weight_hv, weight_lv );
+            vec_st( srcv, 0, dst );
+        }
+    }
+    else
+    {
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 16, vec_u8_t, src );
+            weight_hv = vec_u8_to_s16_h( srcv );
+            weight_lv = vec_u8_to_s16_l( srcv );
+
+            weight_hv = vec_mladd( weight_hv, scalev, offsetv );
+            weight_lv = vec_mladd( weight_lv, scalev, offsetv );
+
+            srcv = vec_packsu( weight_hv, weight_lv );
+            vec_st( srcv, 0, dst );
+        }
+    }
+}
+static void mc_weight_w20_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+                                   const x264_weight_t *weight, int i_height )
+{
+    LOAD_ZERO;
+    PREP_LOAD_SRC( src );
+    vec_u8_t src_1v, src_2v, src_3v;
+    vec_s16_t weight_lv, weight_hv, weight_3v;
+    vec_s16_t scalev, offsetv, denomv, roundv;
+    vec_s16_u loadv;
+
+    int denom = weight->i_denom;
+
+    loadv.s[0] = weight->i_scale;
+    scalev = vec_splat( loadv.v, 0 );
+
+    loadv.s[0] = weight->i_offset;
+    offsetv = vec_splat( loadv.v, 0 );
+
+    if( denom >= 1 )
+    {
+        loadv.s[0] = denom;
+        denomv = vec_splat( loadv.v, 0 );
+
+        loadv.s[0] = 1<<(denom - 1);
+        roundv = vec_splat( loadv.v, 0 );
+
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            src_1v = vec_ld( 0,  src );
+            src_2v = vec_ld( 16, src );
+            src_3v = vec_ld( 19, src );
+            src_1v = vec_perm( src_1v, src_2v, _src_ );
+            src_3v = vec_perm( src_2v, src_3v, _src_ );
+            weight_hv = vec_u8_to_s16_h( src_1v );
+            weight_lv = vec_u8_to_s16_l( src_1v );
+            weight_3v = vec_u8_to_s16_h( src_3v );
+
+            weight_hv = vec_mladd( weight_hv, scalev, roundv );
+            weight_lv = vec_mladd( weight_lv, scalev, roundv );
+            weight_3v = vec_mladd( weight_3v, scalev, roundv );
+            weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
+            weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
+            weight_3v = vec_sra( weight_3v, (vec_u16_t)denomv );
+            weight_hv = vec_add( weight_hv, offsetv );
+            weight_lv = vec_add( weight_lv, offsetv );
+            weight_3v = vec_add( weight_3v, offsetv );
+
+            src_1v = vec_packsu( weight_hv, weight_lv );
+            src_3v = vec_packsu( weight_3v, zero_s16v );
+            vec_st( src_1v, 0, dst );
+            vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
+        }
+    }
+    else
+    {
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            src_1v = vec_ld( 0,  src );
+            src_2v = vec_ld( 16, src );
+            src_3v = vec_ld( 19, src );
+            src_1v = vec_perm( src_1v, src_2v, _src_ );
+            src_3v = vec_perm( src_2v, src_3v, _src_ );
+            weight_hv = vec_u8_to_s16_h( src_1v );
+            weight_lv = vec_u8_to_s16_l( src_1v );
+            weight_3v = vec_u8_to_s16_h( src_3v );
+
+            weight_hv = vec_mladd( weight_hv, scalev, offsetv );
+            weight_lv = vec_mladd( weight_lv, scalev, offsetv );
+            weight_3v = vec_mladd( weight_3v, scalev, offsetv );
+
+            src_1v = vec_packsu( weight_hv, weight_lv );
+            src_3v = vec_packsu( weight_3v, zero_s16v );
+            vec_st( src_1v, 0, dst );
+            vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
+        }
+    }
+}
+
+static weight_fn_t x264_mc_weight_wtab_altivec[6] =
+{
+    mc_weight_w2_altivec,
+    mc_weight_w4_altivec,
+    mc_weight_w8_altivec,
+    mc_weight_w16_altivec,
+    mc_weight_w16_altivec,
+    mc_weight_w20_altivec,
+};
+
+#endif // !HIGH_BIT_DEPTH
+
 void x264_mc_altivec_init( x264_mc_functions_t *pf )
 {
+#if !HIGH_BIT_DEPTH
     pf->mc_luma   = mc_luma_altivec;
     pf->get_ref   = get_ref_altivec;
     pf->mc_chroma = mc_chroma_altivec;
 
+    pf->copy_16x16_unaligned = x264_mc_copy_w16_altivec;
+    pf->copy[PIXEL_16x16] = x264_mc_copy_w16_aligned_altivec;
+
     pf->hpel_filter = x264_hpel_filter_altivec;
+    pf->frame_init_lowres_core = frame_init_lowres_core_altivec;
+
+    pf->weight = x264_mc_weight_wtab_altivec;
+#endif // !HIGH_BIT_DEPTH
 }