]> git.sesse.net Git - x264/blobdiff - common/ppc/mc.c
Bump dates to 2016
[x264] / common / ppc / mc.c
index c703d085bc66be163531cf4c7624174a38bc6a27..8df289457cab6579039b65a8fe5666414c6c3c5e 100644 (file)
@@ -1,7 +1,7 @@
 /*****************************************************************************
- * mc.c: h264 encoder library (Motion Compensation)
+ * mc.c: ppc motion compensation
  *****************************************************************************
- * Copyright (C) 2003-2008 x264 project
+ * Copyright (C) 2003-2016 x264 project
  *
  * Authors: Eric Petit <eric.petit@lapsus.org>
  *          Guillaume Poirier <gpoirier@mplayerhq.hu>
@@ -19,6 +19,9 @@
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at licensing@x264.com.
  *****************************************************************************/
 
 #include <stdlib.h>
 #include "mc.h"
 #include "ppccommon.h"
 
-typedef void (*pf_mc_t)( uint8_t *src, int i_src,
-                         uint8_t *dst, int i_dst, int i_height );
-
-
-static const int hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
-static const int hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
-
+#if !HIGH_BIT_DEPTH
+typedef void (*pf_mc_t)( uint8_t *src, intptr_t i_src,
+                         uint8_t *dst, intptr_t i_dst, int i_height );
 
 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
 {
@@ -47,42 +46,38 @@ static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
            pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] +
            pix[ 3*i_pix_next];
 }
+
 static inline int x264_tapfilter1( uint8_t *pix )
 {
     return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] +
            pix[ 3];
 }
 
-
-static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst,  int i_dst,
-                                               uint8_t *src1, int i_src1,
+static inline void x264_pixel_avg2_w4_altivec( uint8_t *dst,  intptr_t i_dst,
+                                               uint8_t *src1, intptr_t i_src1,
                                                uint8_t *src2, int i_height )
 {
-    int x, y;
-    for( y = 0; y < i_height; y++ )
+    for( int y = 0; y < i_height; y++ )
     {
-        for( x = 0; x < 4; x++ )
-        {
+        for( int x = 0; x < 4; x++ )
             dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
-        }
         dst  += i_dst;
         src1 += i_src1;
         src2 += i_src1;
     }
 }
 
-static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst,  int i_dst,
-                                               uint8_t *src1, int i_src1,
+static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst,  intptr_t i_dst,
+                                               uint8_t *src1, intptr_t i_src1,
                                                uint8_t *src2, int i_height )
 {
-    int y;
     vec_u8_t src1v, src2v;
     PREP_LOAD;
     PREP_STORE8;
     PREP_LOAD_SRC( src1 );
     PREP_LOAD_SRC( src2 );
 
-    for( y = 0; y < i_height; y++ )
+    for( int y = 0; y < i_height; y++ )
     {
         VEC_LOAD( src1, src1v, 8, vec_u8_t, src1 );
         VEC_LOAD( src2, src2v, 8, vec_u8_t, src2 );
@@ -95,17 +90,16 @@ static inline void x264_pixel_avg2_w8_altivec( uint8_t *dst,  int i_dst,
     }
 }
 
-static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst,  int i_dst,
-                                                uint8_t *src1, int i_src1,
+static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst,  intptr_t i_dst,
+                                                uint8_t *src1, intptr_t i_src1,
                                                 uint8_t *src2, int i_height )
 {
-    int y;
     vec_u8_t src1v, src2v;
     PREP_LOAD;
     PREP_LOAD_SRC( src1 );
     PREP_LOAD_SRC( src2 );
 
-    for( y = 0; y < i_height; y++ )
+    for( int y = 0; y < i_height; y++ )
     {
         VEC_LOAD( src1, src1v, 16, vec_u8_t, src1 );
         VEC_LOAD( src2, src2v, 16, vec_u8_t, src2 );
@@ -118,8 +112,8 @@ static inline void x264_pixel_avg2_w16_altivec( uint8_t *dst,  int i_dst,
     }
 }
 
-static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst,  int i_dst,
-                                                uint8_t *src1, int i_src1,
+static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst,  intptr_t i_dst,
+                                                uint8_t *src1, intptr_t i_src1,
                                                 uint8_t *src2, int i_height )
 {
     x264_pixel_avg2_w16_altivec(dst, i_dst, src1, i_src1, src2, i_height);
@@ -129,8 +123,8 @@ static inline void x264_pixel_avg2_w20_altivec( uint8_t *dst,  int i_dst,
 /* mc_copy: plain c */
 
 #define MC_COPY( name, a )                                \
-static void name( uint8_t *dst, int i_dst,                \
-                  uint8_t *src, int i_src, int i_height ) \
+static void name( uint8_t *dst, intptr_t i_dst,           \
+                  uint8_t *src, intptr_t i_src, int i_height ) \
 {                                                         \
     int y;                                                \
     for( y = 0; y < i_height; y++ )                       \
@@ -143,15 +137,14 @@ static void name( uint8_t *dst, int i_dst,                \
 MC_COPY( x264_mc_copy_w4_altivec,  4  )
 MC_COPY( x264_mc_copy_w8_altivec,  8  )
 
-static void x264_mc_copy_w16_altivec( uint8_t *dst, int i_dst,
-                                      uint8_t *src, int i_src, int i_height )
+static void x264_mc_copy_w16_altivec( uint8_t *dst, intptr_t i_dst,
+                                      uint8_t *src, intptr_t i_src, int i_height )
 {
-    int y;
     vec_u8_t cpyV;
     PREP_LOAD;
     PREP_LOAD_SRC( src );
 
-    for( y = 0; y < i_height; y++)
+    for( int y = 0; y < i_height; y++ )
     {
         VEC_LOAD( src, cpyV, 16, vec_u8_t, src );
         vec_st(cpyV, 0, dst);
@@ -162,14 +155,12 @@ static void x264_mc_copy_w16_altivec( uint8_t *dst, int i_dst,
 }
 
 
-static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, int i_dst,
-                                              uint8_t *src, int i_src, int i_height )
+static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, intptr_t i_dst,
+                                              uint8_t *src, intptr_t i_src, int i_height )
 {
-    int y;
-
-    for( y = 0; y < i_height; ++y)
+    for( int y = 0; y < i_height; ++y )
     {
-        vec_u8_t cpyV = vec_ld( 0, src);
+        vec_u8_t cpyV = vec_ld( 0, src );
         vec_st(cpyV, 0, dst);
 
         src += i_src;
@@ -178,76 +169,89 @@ static void x264_mc_copy_w16_aligned_altivec( uint8_t *dst, int i_dst,
 }
 
 
-static void mc_luma_altivec( uint8_t *dst,    int i_dst_stride,
-                             uint8_t *src[4], int i_src_stride,
+static void mc_luma_altivec( uint8_t *dst,    intptr_t i_dst_stride,
+                             uint8_t *src[4], intptr_t i_src_stride,
                              int mvx, int mvy,
-                             int i_width, int i_height )
+                             int i_width, int i_height, const x264_weight_t *weight )
 {
     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
-    int offset = (mvy>>2)*i_src_stride + (mvx>>2);
-    uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
+    intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
+    uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
     if( qpel_idx & 5 ) /* qpel interpolation needed */
     {
-        uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
-
-        switch(i_width) {
-        case 4:
-            x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
-            break;
-        case 8:
-            x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
-            break;
-        case 16:
-        default:
-            x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
-        }
+        uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
 
+        switch( i_width )
+        {
+            case 4:
+                x264_pixel_avg2_w4_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
+                break;
+            case 8:
+                x264_pixel_avg2_w8_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
+                break;
+            case 16:
+            default:
+                x264_pixel_avg2_w16_altivec( dst, i_dst_stride, src1, i_src_stride, src2, i_height );
+        }
+        if( weight->weightfn )
+            weight->weightfn[i_width>>2]( dst, i_dst_stride, dst, i_dst_stride, weight, i_height );
     }
+    else if( weight->weightfn )
+        weight->weightfn[i_width>>2]( dst, i_dst_stride, src1, i_src_stride, weight, i_height );
     else
     {
-        switch(i_width) {
-        case 4:
-            x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
-            break;
-        case 8:
-            x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
-            break;
-        case 16:
-            x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
-            break;
+        switch( i_width )
+        {
+            case 4:
+                x264_mc_copy_w4_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
+                break;
+            case 8:
+                x264_mc_copy_w8_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
+                break;
+            case 16:
+                x264_mc_copy_w16_altivec( dst, i_dst_stride, src1, i_src_stride, i_height );
+                break;
         }
     }
 }
 
 
 
-static uint8_t *get_ref_altivec( uint8_t *dst,   int *i_dst_stride,
-                                 uint8_t *src[4], int i_src_stride,
+static uint8_t *get_ref_altivec( uint8_t *dst,   intptr_t *i_dst_stride,
+                                 uint8_t *src[4], intptr_t i_src_stride,
                                  int mvx, int mvy,
-                                 int i_width, int i_height )
+                                 int i_width, int i_height, const x264_weight_t *weight )
 {
     int qpel_idx = ((mvy&3)<<2) + (mvx&3);
-    int offset = (mvy>>2)*i_src_stride + (mvx>>2);
-    uint8_t *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
+    intptr_t offset = (mvy>>2)*i_src_stride + (mvx>>2);
+    uint8_t *src1 = src[x264_hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
     if( qpel_idx & 5 ) /* qpel interpolation needed */
     {
-        uint8_t *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
-        switch(i_width) {
-        case 4:
-            x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
-            break;
-        case 8:
-            x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
-            break;
-        case 12:
-        case 16:
-        default:
-            x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
-            break;
-        case 20:
-            x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
-            break;
+        uint8_t *src2 = src[x264_hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
+        switch( i_width )
+        {
+            case 4:
+                x264_pixel_avg2_w4_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
+                break;
+            case 8:
+                x264_pixel_avg2_w8_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
+                break;
+            case 12:
+            case 16:
+            default:
+                x264_pixel_avg2_w16_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
+                break;
+            case 20:
+                x264_pixel_avg2_w20_altivec( dst, *i_dst_stride, src1, i_src_stride, src2, i_height );
+                break;
         }
+        if( weight->weightfn )
+            weight->weightfn[i_width>>2]( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_height );
+        return dst;
+    }
+    else if( weight->weightfn )
+    {
+        weight->weightfn[i_width>>2]( dst, *i_dst_stride, src1, i_src_stride, weight, i_height );
         return dst;
     }
     else
@@ -257,49 +261,47 @@ static uint8_t *get_ref_altivec( uint8_t *dst,   int *i_dst_stride,
     }
 }
 
-static void mc_chroma_2xh( uint8_t *dst, int i_dst_stride,
-                           uint8_t *src, int i_src_stride,
-                           int mvx, int mvy,
-                           int i_height )
+static void mc_chroma_2xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
+                           uint8_t *src, intptr_t i_src_stride,
+                           int mvx, int mvy, int i_height )
 {
     uint8_t *srcp;
-    int y;
     int d8x = mvx&0x07;
     int d8y = mvy&0x07;
 
-    const int cA = (8-d8x)*(8-d8y);
-    const int cB = d8x    *(8-d8y);
-    const int cC = (8-d8x)*d8y;
-    const int cD = d8x    *d8y;
+    int cA = (8-d8x)*(8-d8y);
+    int cB = d8x    *(8-d8y);
+    int cC = (8-d8x)*d8y;
+    int cD = d8x    *d8y;
 
-    src  += (mvy >> 3) * i_src_stride + (mvx >> 3);
-    srcp  = &src[i_src_stride];
+    src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
+    srcp = &src[i_src_stride];
 
-    for( y = 0; y < i_height; y++ )
+    for( int y = 0; y < i_height; y++ )
     {
-        dst[0] = ( cA*src[0] +  cB*src[0+1] +
-                  cC*srcp[0] + cD*srcp[0+1] + 32 ) >> 6;
-        dst[1] = ( cA*src[1] +  cB*src[1+1] +
-                  cC*srcp[1] + cD*srcp[1+1] + 32 ) >> 6;
+        dstu[0] = ( cA*src[0] + cB*src[2] + cC*srcp[0] + cD*srcp[2] + 32 ) >> 6;
+        dstv[0] = ( cA*src[1] + cB*src[3] + cC*srcp[1] + cD*srcp[3] + 32 ) >> 6;
+        dstu[1] = ( cA*src[2] + cB*src[4] + cC*srcp[2] + cD*srcp[4] + 32 ) >> 6;
+        dstv[1] = ( cA*src[3] + cB*src[5] + cC*srcp[3] + cD*srcp[5] + 32 ) >> 6;
 
         src  += i_src_stride;
         srcp += i_src_stride;
-        dst  += i_dst_stride;
+        dstu += i_dst_stride;
+        dstv += i_dst_stride;
     }
  }
 
+#ifdef WORDS_BIGENDIAN
+#define VSLD(a,b,n) vec_sld(a,b,n)
+#else
+#define VSLD(a,b,n) vec_sld(b,a,16-n)
+#endif
 
-#define DO_PROCESS_W4( a )  \
-    dstv_16A = vec_mladd( src##a##v_16A, coeff##a##v, dstv_16A );   \
-    dstv_16B = vec_mladd( src##a##v_16B, coeff##a##v, dstv_16B )
-
-static void mc_chroma_altivec_4xh( uint8_t *dst, int i_dst_stride,
-                                   uint8_t *src, int i_src_stride,
-                                   int mvx, int mvy,
-                                   int i_height )
+static void mc_chroma_altivec_4xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
+                                   uint8_t *src, intptr_t i_src_stride,
+                                   int mvx, int mvy, int i_height )
 {
     uint8_t *srcp;
-    int y;
     int d8x = mvx & 0x07;
     int d8y = mvy & 0x07;
 
@@ -309,19 +311,25 @@ static void mc_chroma_altivec_4xh( uint8_t *dst, int i_dst_stride,
     coeff[2] = (8-d8x)*d8y;
     coeff[3] = d8x    *d8y;
 
-    src  += (mvy >> 3) * i_src_stride + (mvx >> 3);
-    srcp  = &src[i_src_stride];
+    src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
+    srcp = &src[i_src_stride];
 
     LOAD_ZERO;
     PREP_LOAD;
     PREP_LOAD_SRC( src );
     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
-    vec_u8_t    src2v_8A, dstv_8A;
-    vec_u8_t    src2v_8B, dstv_8B;
-    vec_u16_t   src0v_16A, src1v_16A, src2v_16A, src3v_16A, dstv_16A;
-    vec_u16_t   src0v_16B, src1v_16B, src2v_16B, src3v_16B, dstv_16B;
+    vec_u8_t    src2v_8, dstuv, dstvv;
+    vec_u16_t   src0v_16, src1v_16, src2v_16, src3v_16, dstv16;
     vec_u16_t   shiftv, k32v;
 
+#ifdef WORDS_BIGENDIAN
+    static const vec_u8_t perm0v = CV(1,5,9,13,1,5,9,13,1,5,9,13,1,5,9,13);
+    static const vec_u8_t perm1v = CV(3,7,11,15,3,7,11,15,3,7,11,15,3,7,11,15);
+#else
+    static const vec_u8_t perm0v = CV(0,4,8,12,0,4,8,12,0,4,8,12,0,4,8,12);
+    static const vec_u8_t perm1v = CV(2,6,10,14,2,6,10,14,2,6,10,14,2,6,10,14);
+#endif
+
     coeff0v = vec_ld( 0, coeff );
     coeff3v = vec_splat( coeff0v, 3 );
     coeff2v = vec_splat( coeff0v, 2 );
@@ -330,57 +338,63 @@ static void mc_chroma_altivec_4xh( uint8_t *dst, int i_dst_stride,
     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
     shiftv  = vec_splat_u16( 6 );
 
-    VEC_LOAD( src, src2v_8B, 5, vec_u8_t, src );
-    src2v_16B = vec_u8_to_u16( src2v_8B );
-    src3v_16B = vec_sld( src2v_16B, src2v_16B, 2 );
+    VEC_LOAD( src, src2v_8, 9, vec_u8_t, src );
+    src2v_16 = vec_u8_to_u16( src2v_8 );
+    src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
 
-    for( y = 0; y < i_height; y+=2 )
+    for( int y = 0; y < i_height; y += 2 )
     {
-        src0v_16A = src2v_16B;
-        src1v_16A = src3v_16B;
+        src0v_16 = src2v_16;
+        src1v_16 = src3v_16;
+        VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
+        src2v_16 = vec_u8_to_u16( src2v_8 );
+        src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
+
+        dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
+        dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
+        dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
+        dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
+
+        dstv16 = vec_sr( dstv16, shiftv );
+
+        dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
+        dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
+        vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
+        vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
 
-        VEC_LOAD_G( srcp, src2v_8A, 5, vec_u8_t );
         srcp += i_src_stride;
-        VEC_LOAD_G( srcp, src2v_8B, 5, vec_u8_t );
+        dstu += i_dst_stride;
+        dstv += i_dst_stride;
+
+        src0v_16 = src2v_16;
+        src1v_16 = src3v_16;
+        VEC_LOAD( srcp, src2v_8, 9, vec_u8_t, src );
+        src2v_16 = vec_u8_to_u16( src2v_8 );
+        src3v_16 = vec_u8_to_u16( VSLD( src2v_8, src2v_8, 2 ) );
+
+        dstv16 = vec_mladd( coeff0v, src0v_16, k32v );
+        dstv16 = vec_mladd( coeff1v, src1v_16, dstv16 );
+        dstv16 = vec_mladd( coeff2v, src2v_16, dstv16 );
+        dstv16 = vec_mladd( coeff3v, src3v_16, dstv16 );
+
+        dstv16 = vec_sr( dstv16, shiftv );
+
+        dstuv = (vec_u8_t)vec_perm( dstv16, dstv16, perm0v );
+        dstvv = (vec_u8_t)vec_perm( dstv16, dstv16, perm1v );
+        vec_ste( (vec_u32_t)dstuv, 0, (uint32_t*) dstu );
+        vec_ste( (vec_u32_t)dstvv, 0, (uint32_t*) dstv );
+
         srcp += i_src_stride;
-        src2v_16A = vec_u8_to_u16( src2v_8A );
-        src2v_16B = vec_u8_to_u16( src2v_8B );
-        src3v_16A = vec_sld( src2v_16A, src2v_16A, 2 );
-        src3v_16B = vec_sld( src2v_16B, src2v_16B, 2 );
-
-        src0v_16B = src2v_16A;
-        src1v_16B = src3v_16A;
-
-        dstv_16A = dstv_16B = k32v;
-        DO_PROCESS_W4( 0 );
-        DO_PROCESS_W4( 1 );
-        DO_PROCESS_W4( 2 );
-        DO_PROCESS_W4( 3 );
-
-        dstv_16A = vec_sr( dstv_16A, shiftv );
-        dstv_16B = vec_sr( dstv_16B, shiftv );
-        dstv_8A  = vec_u16_to_u8( dstv_16A );
-        dstv_8B  = vec_u16_to_u8( dstv_16B );
-        vec_ste( vec_splat( (vec_u32_t) dstv_8A, 0 ), 0, (uint32_t*) dst );
-        dst += i_dst_stride;
-        vec_ste( vec_splat( (vec_u32_t) dstv_8B, 0 ), 0, (uint32_t*) dst );
-        dst += i_dst_stride;
+        dstu += i_dst_stride;
+        dstv += i_dst_stride;
     }
 }
 
-#define DO_PROCESS_W8( a )  \
-    src##a##v_16A = vec_u8_to_u16( src##a##v_8A );  \
-    src##a##v_16B = vec_u8_to_u16( src##a##v_8B );  \
-    dstv_16A = vec_mladd( src##a##v_16A, coeff##a##v, dstv_16A );   \
-    dstv_16B = vec_mladd( src##a##v_16B, coeff##a##v, dstv_16B )
-
-static void mc_chroma_altivec_8xh( uint8_t *dst, int i_dst_stride,
-                                   uint8_t *src, int i_src_stride,
-                                   int mvx, int mvy,
-                                   int i_height )
+static void mc_chroma_altivec_8xh( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
+                                   uint8_t *src, intptr_t i_src_stride,
+                                   int mvx, int mvy, int i_height )
 {
     uint8_t *srcp;
-    int y;
     int d8x = mvx & 0x07;
     int d8y = mvy & 0x07;
 
@@ -390,18 +404,18 @@ static void mc_chroma_altivec_8xh( uint8_t *dst, int i_dst_stride,
     coeff[2] = (8-d8x)*d8y;
     coeff[3] = d8x    *d8y;
 
-    src  += (mvy >> 3) * i_src_stride + (mvx >> 3);
-    srcp  = &src[i_src_stride];
+    src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
+    srcp = &src[i_src_stride];
 
     LOAD_ZERO;
     PREP_LOAD;
     PREP_LOAD_SRC( src );
     PREP_STORE8;
     vec_u16_t   coeff0v, coeff1v, coeff2v, coeff3v;
-    vec_u8_t    src0v_8A, src1v_8A, src2v_8A, src3v_8A, dstv_8A;
-    vec_u8_t    src0v_8B, src1v_8B, src2v_8B, src3v_8B, dstv_8B;
-    vec_u16_t   src0v_16A, src1v_16A, src2v_16A, src3v_16A, dstv_16A;
-    vec_u16_t   src0v_16B, src1v_16B, src2v_16B, src3v_16B, dstv_16B;
+    vec_u8_t    src0v_8, src1v_8, src2v_8, src3v_8;
+    vec_u8_t    dstuv, dstvv;
+    vec_u16_t   src0v_16h, src1v_16h, src2v_16h, src3v_16h, dstv_16h;
+    vec_u16_t   src0v_16l, src1v_16l, src2v_16l, src3v_16l, dstv_16l;
     vec_u16_t   shiftv, k32v;
 
     coeff0v = vec_ld( 0, coeff );
@@ -412,60 +426,111 @@ static void mc_chroma_altivec_8xh( uint8_t *dst, int i_dst_stride,
     k32v    = vec_sl( vec_splat_u16( 1 ), vec_splat_u16( 5 ) );
     shiftv  = vec_splat_u16( 6 );
 
-    VEC_LOAD( src, src2v_8B, 9, vec_u8_t, src );
-    src3v_8B = vec_sld( src2v_8B, src2v_8B, 1 );
+#ifdef WORDS_BIGENDIAN
+    static const vec_u8_t perm0v = CV(1,5,9,13,17,21,25,29,0,0,0,0,0,0,0,0);
+    static const vec_u8_t perm1v = CV(3,7,11,15,19,23,27,31,0,0,0,0,0,0,0,0);
+#else
+    static const vec_u8_t perm0v = CV(0,4,8,12,16,20,24,28,1,1,1,1,1,1,1,1);
+    static const vec_u8_t perm1v = CV(2,6,10,14,18,22,26,30,1,1,1,1,1,1,1,1);
+#endif
 
-    for( y = 0; y < i_height; y+=2 )
+    VEC_LOAD( src, src2v_8, 16, vec_u8_t, src );
+    VEC_LOAD( src+16, src3v_8, 2, vec_u8_t, src );
+    src3v_8 = VSLD( src2v_8, src3v_8, 2 );
+
+    for( int y = 0; y < i_height; y += 2 )
     {
-        src0v_8A = src2v_8B;
-        src1v_8A = src3v_8B;
+        src0v_8 = src2v_8;
+        src1v_8 = src3v_8;
+        VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
+        VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
+
+        src3v_8 = VSLD( src2v_8, src3v_8, 2 );
+
+        src0v_16h = vec_u8_to_u16_h( src0v_8 );
+        src0v_16l = vec_u8_to_u16_l( src0v_8 );
+        src1v_16h = vec_u8_to_u16_h( src1v_8 );
+        src1v_16l = vec_u8_to_u16_l( src1v_8 );
+        src2v_16h = vec_u8_to_u16_h( src2v_8 );
+        src2v_16l = vec_u8_to_u16_l( src2v_8 );
+        src3v_16h = vec_u8_to_u16_h( src3v_8 );
+        src3v_16l = vec_u8_to_u16_l( src3v_8 );
+
+        dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
+        dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
+        dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
+        dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
+        dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
+        dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
+        dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
+        dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
+
+        dstv_16h = vec_sr( dstv_16h, shiftv );
+        dstv_16l = vec_sr( dstv_16l, shiftv );
+
+        dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
+        dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
+
+        VEC_STORE8( dstuv, dstu );
+        VEC_STORE8( dstvv, dstv );
 
-        VEC_LOAD_G( srcp, src2v_8A, 9, vec_u8_t );
         srcp += i_src_stride;
-        VEC_LOAD_G( srcp, src2v_8B, 9, vec_u8_t );
+        dstu += i_dst_stride;
+        dstv += i_dst_stride;
+
+        src0v_8 = src2v_8;
+        src1v_8 = src3v_8;
+        VEC_LOAD( srcp, src2v_8, 16, vec_u8_t, src );
+        VEC_LOAD( srcp+16, src3v_8, 2, vec_u8_t, src );
+
+        src3v_8 = VSLD( src2v_8, src3v_8, 2 );
+
+        src0v_16h = vec_u8_to_u16_h( src0v_8 );
+        src0v_16l = vec_u8_to_u16_l( src0v_8 );
+        src1v_16h = vec_u8_to_u16_h( src1v_8 );
+        src1v_16l = vec_u8_to_u16_l( src1v_8 );
+        src2v_16h = vec_u8_to_u16_h( src2v_8 );
+        src2v_16l = vec_u8_to_u16_l( src2v_8 );
+        src3v_16h = vec_u8_to_u16_h( src3v_8 );
+        src3v_16l = vec_u8_to_u16_l( src3v_8 );
+
+        dstv_16h = vec_mladd( coeff0v, src0v_16h, k32v );
+        dstv_16l = vec_mladd( coeff0v, src0v_16l, k32v );
+        dstv_16h = vec_mladd( coeff1v, src1v_16h, dstv_16h );
+        dstv_16l = vec_mladd( coeff1v, src1v_16l, dstv_16l );
+        dstv_16h = vec_mladd( coeff2v, src2v_16h, dstv_16h );
+        dstv_16l = vec_mladd( coeff2v, src2v_16l, dstv_16l );
+        dstv_16h = vec_mladd( coeff3v, src3v_16h, dstv_16h );
+        dstv_16l = vec_mladd( coeff3v, src3v_16l, dstv_16l );
+
+        dstv_16h = vec_sr( dstv_16h, shiftv );
+        dstv_16l = vec_sr( dstv_16l, shiftv );
+
+        dstuv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm0v );
+        dstvv = (vec_u8_t)vec_perm( dstv_16h, dstv_16l, perm1v );
+
+        VEC_STORE8( dstuv, dstu );
+        VEC_STORE8( dstvv, dstv );
+
         srcp += i_src_stride;
-        src3v_8A = vec_sld( src2v_8A, src2v_8A, 1 );
-        src3v_8B = vec_sld( src2v_8B, src2v_8B, 1 );
-
-        src0v_8B = src2v_8A;
-        src1v_8B = src3v_8A;
-        dstv_16A = dstv_16B = k32v;
-        DO_PROCESS_W8( 0 );
-        DO_PROCESS_W8( 1 );
-        DO_PROCESS_W8( 2 );
-        DO_PROCESS_W8( 3 );
-
-        dstv_16A = vec_sr( dstv_16A, shiftv );
-        dstv_16B = vec_sr( dstv_16B, shiftv );
-        dstv_8A  = vec_u16_to_u8( dstv_16A );
-        dstv_8B  = vec_u16_to_u8( dstv_16B );
-        VEC_STORE8( dstv_8A, dst );
-        dst += i_dst_stride;
-        VEC_STORE8( dstv_8B, dst );
-        dst += i_dst_stride;
+        dstu += i_dst_stride;
+        dstv += i_dst_stride;
     }
 }
 
-static void mc_chroma_altivec( uint8_t *dst, int i_dst_stride,
-                               uint8_t *src, int i_src_stride,
-                               int mvx, int mvy,
-                               int i_width, int i_height )
+static void mc_chroma_altivec( uint8_t *dstu, uint8_t *dstv, intptr_t i_dst_stride,
+                               uint8_t *src, intptr_t i_src_stride,
+                               int mvx, int mvy, int i_width, int i_height )
 {
     if( i_width == 8 )
-    {
-        mc_chroma_altivec_8xh( dst, i_dst_stride, src, i_src_stride,
+        mc_chroma_altivec_8xh( dstu, dstv, i_dst_stride, src, i_src_stride,
                                mvx, mvy, i_height );
-    }
     else if( i_width == 4 )
-    {
-        mc_chroma_altivec_4xh( dst, i_dst_stride, src, i_src_stride,
+        mc_chroma_altivec_4xh( dstu, dstv, i_dst_stride, src, i_src_stride,
                                mvx, mvy, i_height );
-    }
     else
-    {
-        mc_chroma_2xh( dst, i_dst_stride, src, i_src_stride,
+        mc_chroma_2xh( dstu, dstv, i_dst_stride, src, i_src_stride,
                        mvx, mvy, i_height );
-    }
 }
 
 #define HPEL_FILTER_1( t1v, t2v, t3v, t4v, t5v, t6v ) \
@@ -501,11 +566,11 @@ static void mc_chroma_altivec( uint8_t *dst, int i_dst_stride,
     VEC_LOAD_G( &src[x- 2+i_stride*y], src1v, 16, vec_u8_t); \
     VEC_LOAD_G( &src[x+14+i_stride*y], src6v, 16, vec_u8_t); \
                                                              \
-    src2v = vec_sld( src1v, src6v,  1 );                     \
-    src3v = vec_sld( src1v, src6v,  2 );                     \
-    src4v = vec_sld( src1v, src6v,  3 );                     \
-    src5v = vec_sld( src1v, src6v,  4 );                     \
-    src6v = vec_sld( src1v, src6v,  5 );                     \
+    src2v = VSLD( src1v, src6v,  1 );                        \
+    src3v = VSLD( src1v, src6v,  2 );                        \
+    src4v = VSLD( src1v, src6v,  3 );                        \
+    src5v = VSLD( src1v, src6v,  4 );                        \
+    src6v = VSLD( src1v, src6v,  5 );                        \
                                                              \
     temp1v = vec_u8_to_s16_h( src1v );                       \
     temp2v = vec_u8_to_s16_h( src2v );                       \
@@ -580,12 +645,12 @@ static void mc_chroma_altivec( uint8_t *dst, int i_dst_stride,
 
 #define HPEL_FILTER_CENTRAL()                           \
 {                                                       \
-    temp1v = vec_sld( tempav, tempbv, 12 );             \
-    temp2v = vec_sld( tempav, tempbv, 14 );             \
+    temp1v = VSLD( tempav, tempbv, 12 );                \
+    temp2v = VSLD( tempav, tempbv, 14 );                \
     temp3v = tempbv;                                    \
-    temp4v = vec_sld( tempbv, tempcv,  2 );             \
-    temp5v = vec_sld( tempbv, tempcv,  4 );             \
-    temp6v = vec_sld( tempbv, tempcv,  6 );             \
+    temp4v = VSLD( tempbv, tempcv,  2 );                \
+    temp5v = VSLD( tempbv, tempcv,  4 );                \
+    temp6v = VSLD( tempbv, tempcv,  6 );                \
                                                         \
     HPEL_FILTER_2( temp1v, temp2v, temp3v,              \
                    temp4v, temp5v, temp6v );            \
@@ -593,12 +658,12 @@ static void mc_chroma_altivec( uint8_t *dst, int i_dst_stride,
     dest1v = vec_add( temp1v, thirtytwov );             \
     dest1v = vec_sra( dest1v, sixv );                   \
                                                         \
-    temp1v = vec_sld( tempbv, tempcv, 12 );             \
-    temp2v = vec_sld( tempbv, tempcv, 14 );             \
+    temp1v = VSLD( tempbv, tempcv, 12 );                \
+    temp2v = VSLD( tempbv, tempcv, 14 );                \
     temp3v = tempcv;                                    \
-    temp4v = vec_sld( tempcv, tempdv,  2 );             \
-    temp5v = vec_sld( tempcv, tempdv,  4 );             \
-    temp6v = vec_sld( tempcv, tempdv,  6 );             \
+    temp4v = VSLD( tempcv, tempdv,  2 );                \
+    temp5v = VSLD( tempcv, tempdv,  4 );                \
+    temp6v = VSLD( tempcv, tempdv,  6 );                \
                                                         \
     HPEL_FILTER_2( temp1v, temp2v, temp3v,              \
                    temp4v, temp5v, temp6v );            \
@@ -612,10 +677,8 @@ static void mc_chroma_altivec( uint8_t *dst, int i_dst_stride,
 }
 
 void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
-                               int i_stride, int i_width, int i_height, int16_t *buf )
+                               intptr_t i_stride, int i_width, int i_height, int16_t *buf )
 {
-    int x, y;
-
     vec_u8_t destv;
     vec_u8_t src1v, src2v, src3v, src4v, src5v, src6v;
     vec_s16_t dest1v, dest2v;
@@ -645,9 +708,9 @@ void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint
     temp_u.s[0]=32;
     thirtytwov = (vec_s16_t)vec_splat( temp_u.v, 0 );
 
-    for( y = 0; y < i_height; y++ )
+    for( int y = 0; y < i_height; y++ )
     {
-        x = 0;
+        int x = 0;
 
         /* horizontal_filter */
         HPEL_FILTER_HORIZONTAL();
@@ -695,8 +758,7 @@ void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint
         temp5v = vec_u8_to_s16_h( src5v );
         temp6v = vec_u8_to_s16_h( src6v );
 
-        HPEL_FILTER_1( temp1v, temp2v, temp3v,
-                       temp4v, temp5v, temp6v );
+        HPEL_FILTER_1( temp1v, temp2v, temp3v, temp4v, temp5v, temp6v );
 
         /* central_filter */
         tempav = tempcv;
@@ -710,18 +772,21 @@ void x264_hpel_filter_altivec( uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint
 }
 
 static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_t *dsth, uint8_t *dstv, uint8_t *dstc,
-                                           int src_stride, int dst_stride, int width, int height )
+                                            intptr_t src_stride, intptr_t dst_stride, int width, int height )
 {
-    int w = width/16;
+    int w = width >> 4;
     int end = (width & 15);
-    int x, y;
     vec_u8_t src0v, src1v, src2v;
     vec_u8_t lv, hv, src1p1v;
     vec_u8_t avg0v, avg1v, avghv, avghp1v, avgleftv, avgrightv;
     static const vec_u8_t inverse_bridge_shuffle = CV(0x00, 0x02, 0x04, 0x06, 0x08, 0x0A, 0x0C, 0x0E, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x1E );
+#ifndef WORDS_BIGENDIAN
+    static const vec_u8_t inverse_bridge_shuffle_1 = CV(0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F, 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F );
+#endif
 
-    for( y=0; y<height; y++ )
+    for( int y = 0; y < height; y++ )
     {
+        int x;
         uint8_t *src1 = src0+src_stride;
         uint8_t *src2 = src1+src_stride;
 
@@ -732,7 +797,7 @@ static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_
         avg0v = vec_avg(src0v, src1v);
         avg1v = vec_avg(src1v, src2v);
 
-        for( x=0; x<w; x++ )
+        for( x = 0; x < w; x++ )
         {
             lv = vec_ld(16*(x*2+1), src0);
             src1v = vec_ld(16*(x*2+1), src1);
@@ -742,11 +807,15 @@ static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_
             src1p1v = vec_ld(16*(x*2+2), src1);
             avghp1v = vec_avg(lv, src1p1v);
 
-            avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
-            avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
+            avgleftv = vec_avg(VSLD(avg0v, avghv, 1), avg0v);
+            avgrightv = vec_avg(VSLD(avghv, avghp1v, 1), avghv);
 
             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dst0);
+#ifdef WORDS_BIGENDIAN
             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dsth);
+#else
+            vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1), 16*x, dsth);
+#endif
 
             avg0v = avghp1v;
 
@@ -756,16 +825,20 @@ static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_
             hv = vec_ld(16*(x*2+2), src2);
             avghp1v = vec_avg(src1p1v, hv);
 
-            avgleftv = vec_avg(vec_sld(avg1v, avghv, 1), avg1v);
-            avgrightv = vec_avg(vec_sld(avghv, avghp1v, 1), avghv);
+            avgleftv = vec_avg(VSLD(avg1v, avghv, 1), avg1v);
+            avgrightv = vec_avg(VSLD(avghv, avghp1v, 1), avghv);
 
             vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle), 16*x, dstv);
+#ifdef WORDS_BIGENDIAN
             vec_st((vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv), 16*x, dstc);
+#else
+            vec_st(vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1), 16*x, dstc);
+#endif
 
             avg1v = avghp1v;
 
         }
-        if (end)
+        if( end )
         {
             lv = vec_ld(16*(x*2+1), src0);
             src1v = vec_ld(16*(x*2+1), src1);
@@ -774,11 +847,15 @@ static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_
             lv = vec_ld(16*(x*2+1), src2);
             avghp1v = vec_avg(src1v, lv);
 
-            avgleftv = vec_avg(vec_sld(avg0v, avghv, 1), avg0v);
-            avgrightv = vec_avg(vec_sld(avg1v, avghp1v, 1), avg1v);
+            avgleftv = vec_avg(VSLD(avg0v, avghv, 1), avg0v);
+            avgrightv = vec_avg(VSLD(avg1v, avghp1v, 1), avg1v);
 
             lv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle);
+#ifdef WORDS_BIGENDIAN
             hv = (vec_u8_t)vec_pack((vec_u16_t)avgleftv,(vec_u16_t)avgrightv);
+#else
+            hv = vec_perm(avgleftv, avgrightv, inverse_bridge_shuffle_1);
+#endif
 
             vec_ste((vec_u32_t)lv,16*x,(uint32_t*)dst0);
             vec_ste((vec_u32_t)lv,16*x+4,(uint32_t*)dst0);
@@ -802,8 +879,322 @@ static void frame_init_lowres_core_altivec( uint8_t *src0, uint8_t *dst0, uint8_
     }
 }
 
+static void mc_weight_w2_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+                                  const x264_weight_t *weight, int i_height )
+{
+    LOAD_ZERO;
+    PREP_LOAD;
+    PREP_LOAD_SRC( src );
+    vec_u8_t srcv;
+    vec_s16_t weightv;
+    vec_s16_t scalev, offsetv, denomv, roundv;
+    vec_s16_u loadv;
+
+    int denom = weight->i_denom;
+
+    loadv.s[0] = weight->i_scale;
+    scalev = vec_splat( loadv.v, 0 );
+
+    loadv.s[0] = weight->i_offset;
+    offsetv = vec_splat( loadv.v, 0 );
+
+    if( denom >= 1 )
+    {
+        loadv.s[0] = denom;
+        denomv = vec_splat( loadv.v, 0 );
+
+        loadv.s[0] = 1<<(denom - 1);
+        roundv = vec_splat( loadv.v, 0 );
+
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 2, vec_u8_t, src );
+            weightv = vec_u8_to_s16( srcv );
+
+            weightv = vec_mladd( weightv, scalev, roundv );
+            weightv = vec_sra( weightv, (vec_u16_t)denomv );
+            weightv = vec_add( weightv, offsetv );
+
+            srcv = vec_packsu( weightv, zero_s16v );
+            vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
+        }
+    }
+    else
+    {
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 2, vec_u8_t, src );
+            weightv = vec_u8_to_s16( srcv );
+
+            weightv = vec_mladd( weightv, scalev, offsetv );
+
+            srcv = vec_packsu( weightv, zero_s16v );
+            vec_ste( vec_splat( (vec_u16_t)srcv, 0 ), 0, (uint16_t*)dst );
+        }
+    }
+}
+static void mc_weight_w4_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+                                  const x264_weight_t *weight, int i_height )
+{
+    LOAD_ZERO;
+    PREP_LOAD;
+    PREP_LOAD_SRC( src );
+    vec_u8_t srcv;
+    vec_s16_t weightv;
+    vec_s16_t scalev, offsetv, denomv, roundv;
+    vec_s16_u loadv;
+
+    int denom = weight->i_denom;
+
+    loadv.s[0] = weight->i_scale;
+    scalev = vec_splat( loadv.v, 0 );
+
+    loadv.s[0] = weight->i_offset;
+    offsetv = vec_splat( loadv.v, 0 );
+
+    if( denom >= 1 )
+    {
+        loadv.s[0] = denom;
+        denomv = vec_splat( loadv.v, 0 );
+
+        loadv.s[0] = 1<<(denom - 1);
+        roundv = vec_splat( loadv.v, 0 );
+
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 4, vec_u8_t, src );
+            weightv = vec_u8_to_s16( srcv );
+
+            weightv = vec_mladd( weightv, scalev, roundv );
+            weightv = vec_sra( weightv, (vec_u16_t)denomv );
+            weightv = vec_add( weightv, offsetv );
+
+            srcv = vec_packsu( weightv, zero_s16v );
+            vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
+        }
+    }
+    else
+    {
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 4, vec_u8_t, src );
+            weightv = vec_u8_to_s16( srcv );
+
+            weightv = vec_mladd( weightv, scalev, offsetv );
+
+            srcv = vec_packsu( weightv, zero_s16v );
+            vec_ste( vec_splat( (vec_u32_t)srcv, 0 ), 0, (uint32_t*)dst );
+        }
+    }
+}
+static void mc_weight_w8_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+                                  const x264_weight_t *weight, int i_height )
+{
+    LOAD_ZERO;
+    PREP_LOAD;
+    PREP_LOAD_SRC( src );
+    PREP_STORE8;
+    vec_u8_t srcv;
+    vec_s16_t weightv;
+    vec_s16_t scalev, offsetv, denomv, roundv;
+    vec_s16_u loadv;
+
+    int denom = weight->i_denom;
+
+    loadv.s[0] = weight->i_scale;
+    scalev = vec_splat( loadv.v, 0 );
+
+    loadv.s[0] = weight->i_offset;
+    offsetv = vec_splat( loadv.v, 0 );
+
+    if( denom >= 1 )
+    {
+        loadv.s[0] = denom;
+        denomv = vec_splat( loadv.v, 0 );
+
+        loadv.s[0] = 1<<(denom - 1);
+        roundv = vec_splat( loadv.v, 0 );
+
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 8, vec_u8_t, src );
+            weightv = vec_u8_to_s16( srcv );
+
+            weightv = vec_mladd( weightv, scalev, roundv );
+            weightv = vec_sra( weightv, (vec_u16_t)denomv );
+            weightv = vec_add( weightv, offsetv );
+
+            srcv = vec_packsu( weightv, zero_s16v );
+            VEC_STORE8( srcv, dst );
+        }
+    }
+    else
+    {
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 8, vec_u8_t, src );
+            weightv = vec_u8_to_s16( srcv );
+
+            weightv = vec_mladd( weightv, scalev, offsetv );
+
+            srcv = vec_packsu( weightv, zero_s16v );
+            VEC_STORE8( srcv, dst );
+        }
+    }
+}
+static void mc_weight_w16_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+                                   const x264_weight_t *weight, int i_height )
+{
+    LOAD_ZERO;
+    PREP_LOAD;
+    PREP_LOAD_SRC( src );
+    vec_u8_t srcv;
+    vec_s16_t weight_lv, weight_hv;
+    vec_s16_t scalev, offsetv, denomv, roundv;
+    vec_s16_u loadv;
+
+    int denom = weight->i_denom;
+
+    loadv.s[0] = weight->i_scale;
+    scalev = vec_splat( loadv.v, 0 );
+
+    loadv.s[0] = weight->i_offset;
+    offsetv = vec_splat( loadv.v, 0 );
+
+    if( denom >= 1 )
+    {
+        loadv.s[0] = denom;
+        denomv = vec_splat( loadv.v, 0 );
+
+        loadv.s[0] = 1<<(denom - 1);
+        roundv = vec_splat( loadv.v, 0 );
+
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 16, vec_u8_t, src );
+            weight_hv = vec_u8_to_s16_h( srcv );
+            weight_lv = vec_u8_to_s16_l( srcv );
+
+            weight_hv = vec_mladd( weight_hv, scalev, roundv );
+            weight_lv = vec_mladd( weight_lv, scalev, roundv );
+            weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
+            weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
+            weight_hv = vec_add( weight_hv, offsetv );
+            weight_lv = vec_add( weight_lv, offsetv );
+
+            srcv = vec_packsu( weight_hv, weight_lv );
+            vec_st( srcv, 0, dst );
+        }
+    }
+    else
+    {
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            VEC_LOAD( src, srcv, 16, vec_u8_t, src );
+            weight_hv = vec_u8_to_s16_h( srcv );
+            weight_lv = vec_u8_to_s16_l( srcv );
+
+            weight_hv = vec_mladd( weight_hv, scalev, offsetv );
+            weight_lv = vec_mladd( weight_lv, scalev, offsetv );
+
+            srcv = vec_packsu( weight_hv, weight_lv );
+            vec_st( srcv, 0, dst );
+        }
+    }
+}
+static void mc_weight_w20_altivec( uint8_t *dst, intptr_t i_dst, uint8_t *src, intptr_t i_src,
+                                   const x264_weight_t *weight, int i_height )
+{
+    LOAD_ZERO;
+    PREP_LOAD_SRC( src );
+    vec_u8_t src_1v, src_2v, src_3v;
+    vec_s16_t weight_lv, weight_hv, weight_3v;
+    vec_s16_t scalev, offsetv, denomv, roundv;
+    vec_s16_u loadv;
+
+    int denom = weight->i_denom;
+
+    loadv.s[0] = weight->i_scale;
+    scalev = vec_splat( loadv.v, 0 );
+
+    loadv.s[0] = weight->i_offset;
+    offsetv = vec_splat( loadv.v, 0 );
+
+    if( denom >= 1 )
+    {
+        loadv.s[0] = denom;
+        denomv = vec_splat( loadv.v, 0 );
+
+        loadv.s[0] = 1<<(denom - 1);
+        roundv = vec_splat( loadv.v, 0 );
+
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            src_1v = vec_ld( 0,  src );
+            src_2v = vec_ld( 16, src );
+            src_3v = vec_ld( 19, src );
+            src_1v = vec_perm( src_1v, src_2v, _src_ );
+            src_3v = vec_perm( src_2v, src_3v, _src_ );
+            weight_hv = vec_u8_to_s16_h( src_1v );
+            weight_lv = vec_u8_to_s16_l( src_1v );
+            weight_3v = vec_u8_to_s16_h( src_3v );
+
+            weight_hv = vec_mladd( weight_hv, scalev, roundv );
+            weight_lv = vec_mladd( weight_lv, scalev, roundv );
+            weight_3v = vec_mladd( weight_3v, scalev, roundv );
+            weight_hv = vec_sra( weight_hv, (vec_u16_t)denomv );
+            weight_lv = vec_sra( weight_lv, (vec_u16_t)denomv );
+            weight_3v = vec_sra( weight_3v, (vec_u16_t)denomv );
+            weight_hv = vec_add( weight_hv, offsetv );
+            weight_lv = vec_add( weight_lv, offsetv );
+            weight_3v = vec_add( weight_3v, offsetv );
+
+            src_1v = vec_packsu( weight_hv, weight_lv );
+            src_3v = vec_packsu( weight_3v, zero_s16v );
+            vec_st( src_1v, 0, dst );
+            vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
+        }
+    }
+    else
+    {
+        for( int y = 0; y < i_height; y++, dst += i_dst, src += i_src )
+        {
+            src_1v = vec_ld( 0,  src );
+            src_2v = vec_ld( 16, src );
+            src_3v = vec_ld( 19, src );
+            src_1v = vec_perm( src_1v, src_2v, _src_ );
+            src_3v = vec_perm( src_2v, src_3v, _src_ );
+            weight_hv = vec_u8_to_s16_h( src_1v );
+            weight_lv = vec_u8_to_s16_l( src_1v );
+            weight_3v = vec_u8_to_s16_h( src_3v );
+
+            weight_hv = vec_mladd( weight_hv, scalev, offsetv );
+            weight_lv = vec_mladd( weight_lv, scalev, offsetv );
+            weight_3v = vec_mladd( weight_3v, scalev, offsetv );
+
+            src_1v = vec_packsu( weight_hv, weight_lv );
+            src_3v = vec_packsu( weight_3v, zero_s16v );
+            vec_st( src_1v, 0, dst );
+            vec_ste( (vec_u32_t)src_3v, 16, (uint32_t*)dst );
+        }
+    }
+}
+
+static weight_fn_t x264_mc_weight_wtab_altivec[6] =
+{
+    mc_weight_w2_altivec,
+    mc_weight_w4_altivec,
+    mc_weight_w8_altivec,
+    mc_weight_w16_altivec,
+    mc_weight_w16_altivec,
+    mc_weight_w20_altivec,
+};
+
+#endif // !HIGH_BIT_DEPTH
+
 void x264_mc_altivec_init( x264_mc_functions_t *pf )
 {
+#if !HIGH_BIT_DEPTH
     pf->mc_luma   = mc_luma_altivec;
     pf->get_ref   = get_ref_altivec;
     pf->mc_chroma = mc_chroma_altivec;
@@ -813,4 +1204,7 @@ void x264_mc_altivec_init( x264_mc_functions_t *pf )
 
     pf->hpel_filter = x264_hpel_filter_altivec;
     pf->frame_init_lowres_core = frame_init_lowres_core_altivec;
+
+    pf->weight = x264_mc_weight_wtab_altivec;
+#endif // !HIGH_BIT_DEPTH
 }