]> git.sesse.net Git - x264/blobdiff - common/ppc/dct.c
Bump dates to 2016
[x264] / common / ppc / dct.c
index 87aab07df48967943a8ef1842a013e6d2abb7721..901659e71069f5c2994a0e553c81f832c40f61ea 100644 (file)
@@ -1,11 +1,10 @@
 /*****************************************************************************
- * dct.c: h264 encoder library
+ * dct.c: ppc transform and zigzag
  *****************************************************************************
- * Copyright (C) 2003 Laurent Aimar
- * $Id$
+ * Copyright (C) 2003-2016 x264 project
  *
- * Authors: Eric Petit <titer@m0k.org>
- *          Guillaume Poirier <gpoirier@mplayerhq.hu>
+ * Authors: Guillaume Poirier <gpoirier@mplayerhq.hu>
+ *          Eric Petit <eric.petit@lapsus.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  *
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111, USA.
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at licensing@x264.com.
  *****************************************************************************/
 
-#ifdef SYS_LINUX
-#include <altivec.h>
-#endif
-
 #include "common/common.h"
 #include "ppccommon.h"
 
+#if !HIGH_BIT_DEPTH
 #define VEC_DCT(a0,a1,a2,a3,b0,b1,b2,b3) \
     b1 = vec_add( a0, a3 );              \
     b3 = vec_add( a1, a2 );              \
@@ -41,8 +40,7 @@
     b3 = vec_sub( a0, a1 );              \
     b3 = vec_sub( b3, a1 )
 
-void x264_sub4x4_dct_altivec( int16_t dct[4][4],
-        uint8_t *pix1, uint8_t *pix2 )
+void x264_sub4x4_dct_altivec( int16_t dct[16], uint8_t *pix1, uint8_t *pix2 )
 {
     PREP_DIFF_8BYTEALIGNED;
     vec_s16_t dct0v, dct1v, dct2v, dct3v;
@@ -60,12 +58,11 @@ void x264_sub4x4_dct_altivec( int16_t dct[4][4],
     permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17);
     VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
 
-    vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0,  (int16_t*)dct);
-    vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, (int16_t*)dct);
+    vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0,  dct);
+    vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, dct);
 }
 
-void x264_sub8x8_dct_altivec( int16_t dct[4][4][4],
-        uint8_t *pix1, uint8_t *pix2 )
+void x264_sub8x8_dct_altivec( int16_t dct[4][16], uint8_t *pix1, uint8_t *pix2 )
 {
     PREP_DIFF_8BYTEALIGNED;
     vec_s16_t dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v;
@@ -94,18 +91,17 @@ void x264_sub8x8_dct_altivec( int16_t dct[4][4][4],
     VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
     VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
 
-    vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0,   (int16_t*)dct);
-    vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16,  (int16_t*)dct);
-    vec_st(vec_perm(tmp4v, tmp5v, permHighv), 32,  (int16_t*)dct);
-    vec_st(vec_perm(tmp6v, tmp7v, permHighv), 48,  (int16_t*)dct);
-    vec_st(vec_perm(tmp0v, tmp1v, permLowv),  64,  (int16_t*)dct);
-    vec_st(vec_perm(tmp2v, tmp3v, permLowv),  80,  (int16_t*)dct);
-    vec_st(vec_perm(tmp4v, tmp5v, permLowv),  96,  (int16_t*)dct);
-    vec_st(vec_perm(tmp6v, tmp7v, permLowv),  112, (int16_t*)dct);
+    vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0,   *dct);
+    vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16,  *dct);
+    vec_st(vec_perm(tmp4v, tmp5v, permHighv), 32,  *dct);
+    vec_st(vec_perm(tmp6v, tmp7v, permHighv), 48,  *dct);
+    vec_st(vec_perm(tmp0v, tmp1v, permLowv),  64,  *dct);
+    vec_st(vec_perm(tmp2v, tmp3v, permLowv),  80,  *dct);
+    vec_st(vec_perm(tmp4v, tmp5v, permLowv),  96,  *dct);
+    vec_st(vec_perm(tmp6v, tmp7v, permLowv),  112, *dct);
 }
 
-void x264_sub16x16_dct_altivec( int16_t dct[16][4][4],
-        uint8_t *pix1, uint8_t *pix2 ) 
+void x264_sub16x16_dct_altivec( int16_t dct[16][16], uint8_t *pix1, uint8_t *pix2 )
 {
     x264_sub8x8_dct_altivec( &dct[ 0], &pix1[0], &pix2[0] );
     x264_sub8x8_dct_altivec( &dct[ 4], &pix1[8], &pix2[8] );
@@ -175,7 +171,7 @@ void x264_sub16x16_dct_altivec( int16_t dct[16][4][4],
 }
 
 
-void x264_sub8x8_dct8_altivec( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
+void x264_sub8x8_dct8_altivec( int16_t dct[64], uint8_t *pix1, uint8_t *pix2 )
 {
     vec_u16_t onev = vec_splat_u16(1);
     vec_u16_t twov = vec_add( onev, onev );
@@ -209,18 +205,18 @@ void x264_sub8x8_dct8_altivec( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
     DCT8_1D_ALTIVEC( dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
                      dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v );
 
-    vec_st( dct_tr0v,  0,  (signed short *)dct );
-    vec_st( dct_tr1v, 16,  (signed short *)dct );
-    vec_st( dct_tr2v, 32,  (signed short *)dct );
-    vec_st( dct_tr3v, 48,  (signed short *)dct );
-    
-    vec_st( dct_tr4v, 64,  (signed short *)dct );
-    vec_st( dct_tr5v, 80,  (signed short *)dct );
-    vec_st( dct_tr6v, 96,  (signed short *)dct );
-    vec_st( dct_tr7v, 112, (signed short *)dct );
+    vec_st( dct_tr0v,  0,  dct );
+    vec_st( dct_tr1v, 16,  dct );
+    vec_st( dct_tr2v, 32,  dct );
+    vec_st( dct_tr3v, 48,  dct );
+
+    vec_st( dct_tr4v, 64,  dct );
+    vec_st( dct_tr5v, 80,  dct );
+    vec_st( dct_tr6v, 96,  dct );
+    vec_st( dct_tr7v, 112, dct );
 }
 
-void x264_sub16x16_dct8_altivec( int16_t dct[4][8][8], uint8_t *pix1, uint8_t *pix2 )
+void x264_sub16x16_dct8_altivec( int16_t dct[4][64], uint8_t *pix1, uint8_t *pix2 )
 {
     x264_sub8x8_dct8_altivec( dct[0], &pix1[0],               &pix2[0] );
     x264_sub8x8_dct8_altivec( dct[1], &pix1[8],               &pix2[8] );
@@ -233,6 +229,99 @@ void x264_sub16x16_dct8_altivec( int16_t dct[4][8][8], uint8_t *pix1, uint8_t *p
  * IDCT transform:
  ****************************************************************************/
 
+#define IDCT_1D_ALTIVEC(s0, s1, s2, s3,  d0, d1, d2, d3) \
+{                                                        \
+    /*        a0  = SRC(0) + SRC(2); */                  \
+    vec_s16_t a0v = vec_add(s0, s2);                     \
+    /*        a1  = SRC(0) - SRC(2); */                  \
+    vec_s16_t a1v = vec_sub(s0, s2);                     \
+    /*        a2  =           (SRC(1)>>1) - SRC(3); */   \
+    vec_s16_t a2v = vec_sub(vec_sra(s1, onev), s3);      \
+    /*        a3  =           (SRC(3)>>1) + SRC(1); */   \
+    vec_s16_t a3v = vec_add(vec_sra(s3, onev), s1);      \
+    /* DST(0,    a0 + a3); */                            \
+    d0 = vec_add(a0v, a3v);                              \
+    /* DST(1,    a1 + a2); */                            \
+    d1 = vec_add(a1v, a2v);                              \
+    /* DST(2,    a1 - a2); */                            \
+    d2 = vec_sub(a1v, a2v);                              \
+    /* DST(3,    a0 - a3); */                            \
+    d3 = vec_sub(a0v, a3v);                              \
+}
+
+#define VEC_LOAD_U8_ADD_S16_STORE_U8(va)             \
+    vdst_orig = vec_ld(0, dst);                      \
+    vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
+    vdst_ss = (vec_s16_t)vec_mergeh(zero_u8v, vdst); \
+    va = vec_add(va, vdst_ss);                       \
+    va_u8 = vec_s16_to_u8(va);                       \
+    va_u32 = vec_splat((vec_u32_t)va_u8, 0);         \
+    vec_ste(va_u32, element, (uint32_t*)dst);
+
+#define ALTIVEC_STORE4_SUM_CLIP(dest, idctv, perm_ldv)          \
+{                                                               \
+    /* unaligned load */                                        \
+    vec_u8_t lv = vec_ld(0, dest);                              \
+    vec_u8_t dstv = vec_perm(lv, zero_u8v, (vec_u8_t)perm_ldv); \
+    vec_s16_t idct_sh6 = vec_sra(idctv, sixv);                  \
+    vec_u16_t dst16 = vec_u8_to_u16_h(dstv);                    \
+    vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16);   \
+    vec_u8_t idstsum8 = vec_s16_to_u8(idstsum);                 \
+    /* unaligned store */                                       \
+    vec_u32_t bodyv = vec_splat((vec_u32_t)idstsum8, 0);        \
+    int element = ((unsigned long)dest & 0xf) >> 2;             \
+    vec_ste(bodyv, element, (uint32_t *)dest);                  \
+}
+
+void x264_add4x4_idct_altivec( uint8_t *dst, int16_t dct[16] )
+{
+    vec_u16_t onev = vec_splat_u16(1);
+
+    dct[0] += 32; // rounding for the >>6 at the end
+
+    vec_s16_t s0, s1, s2, s3;
+
+    s0 = vec_ld( 0x00, dct );
+    s1 = vec_sld( s0, s0, 8 );
+    s2 = vec_ld( 0x10, dct );
+    s3 = vec_sld( s2, s2, 8 );
+
+    vec_s16_t d0, d1, d2, d3;
+    IDCT_1D_ALTIVEC( s0, s1, s2, s3, d0, d1, d2, d3 );
+
+    vec_s16_t tr0, tr1, tr2, tr3;
+
+    VEC_TRANSPOSE_4( d0, d1, d2, d3, tr0, tr1, tr2, tr3 );
+
+    vec_s16_t idct0, idct1, idct2, idct3;
+    IDCT_1D_ALTIVEC( tr0, tr1, tr2, tr3, idct0, idct1, idct2, idct3 );
+
+    vec_u8_t perm_ldv = vec_lvsl( 0, dst );
+    vec_u16_t sixv = vec_splat_u16(6);
+    LOAD_ZERO;
+
+    ALTIVEC_STORE4_SUM_CLIP( &dst[0*FDEC_STRIDE], idct0, perm_ldv );
+    ALTIVEC_STORE4_SUM_CLIP( &dst[1*FDEC_STRIDE], idct1, perm_ldv );
+    ALTIVEC_STORE4_SUM_CLIP( &dst[2*FDEC_STRIDE], idct2, perm_ldv );
+    ALTIVEC_STORE4_SUM_CLIP( &dst[3*FDEC_STRIDE], idct3, perm_ldv );
+}
+
+void x264_add8x8_idct_altivec( uint8_t *p_dst, int16_t dct[4][16] )
+{
+    x264_add4x4_idct_altivec( &p_dst[0],               dct[0] );
+    x264_add4x4_idct_altivec( &p_dst[4],               dct[1] );
+    x264_add4x4_idct_altivec( &p_dst[4*FDEC_STRIDE+0], dct[2] );
+    x264_add4x4_idct_altivec( &p_dst[4*FDEC_STRIDE+4], dct[3] );
+}
+
+void x264_add16x16_idct_altivec( uint8_t *p_dst, int16_t dct[16][16] )
+{
+    x264_add8x8_idct_altivec( &p_dst[0],               &dct[0] );
+    x264_add8x8_idct_altivec( &p_dst[8],               &dct[4] );
+    x264_add8x8_idct_altivec( &p_dst[8*FDEC_STRIDE+0], &dct[8] );
+    x264_add8x8_idct_altivec( &p_dst[8*FDEC_STRIDE+8], &dct[12] );
+}
+
 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,  d0, d1, d2, d3, d4, d5, d6, d7)\
 {\
     /*        a0  = SRC(0) + SRC(4); */ \
@@ -295,7 +384,7 @@ void x264_sub16x16_dct8_altivec( int16_t dct[4][8][8], uint8_t *pix1, uint8_t *p
     vec_u8_t lv = vec_ld( 7, dest );                           \
     vec_u8_t dstv   = vec_perm( hv, lv, (vec_u8_t)perm_ldv );  \
     vec_s16_t idct_sh6 = vec_sra(idctv, sixv);                 \
-    vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv);   \
+    vec_u16_t dst16 = vec_u8_to_u16_h(dstv);                   \
     vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16);  \
     vec_u8_t idstsum8 = vec_packsu(zero_s16v, idstsum);        \
     /* unaligned store */                                      \
@@ -309,23 +398,23 @@ void x264_sub16x16_dct8_altivec( int16_t dct[4][8][8], uint8_t *pix1, uint8_t *p
     vec_st( hv, 0, dest );                                     \
 }
 
-void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[8][8] )
+void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[64] )
 {
     vec_u16_t onev = vec_splat_u16(1);
     vec_u16_t twov = vec_splat_u16(2);
 
-    dct[0][0] += 32; // rounding for the >>6 at the end
+    dct[0] += 32; // rounding for the >>6 at the end
 
     vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;
 
-    s0 = vec_ld(0x00, (int16_t*)dct);
-    s1 = vec_ld(0x10, (int16_t*)dct);
-    s2 = vec_ld(0x20, (int16_t*)dct);
-    s3 = vec_ld(0x30, (int16_t*)dct);
-    s4 = vec_ld(0x40, (int16_t*)dct);
-    s5 = vec_ld(0x50, (int16_t*)dct);
-    s6 = vec_ld(0x60, (int16_t*)dct);
-    s7 = vec_ld(0x70, (int16_t*)dct);
+    s0 = vec_ld(0x00, dct);
+    s1 = vec_ld(0x10, dct);
+    s2 = vec_ld(0x20, dct);
+    s3 = vec_ld(0x30, dct);
+    s4 = vec_ld(0x40, dct);
+    s5 = vec_ld(0x50, dct);
+    s6 = vec_ld(0x60, dct);
+    s7 = vec_ld(0x70, dct);
 
     vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;
     IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,  d0, d1, d2, d3, d4, d5, d6, d7);
@@ -355,7 +444,7 @@ void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[8][8] )
     ALTIVEC_STORE_SUM_CLIP(&dst[7*FDEC_STRIDE], idct7, perm_ldv, perm_stv, sel);
 }
 
-void x264_add16x16_idct8_altivec( uint8_t *dst, int16_t dct[4][8][8] )
+void x264_add16x16_idct8_altivec( uint8_t *dst, int16_t dct[4][64] )
 {
     x264_add8x8_idct8_altivec( &dst[0],               dct[0] );
     x264_add8x8_idct8_altivec( &dst[8],               dct[1] );
@@ -363,3 +452,39 @@ void x264_add16x16_idct8_altivec( uint8_t *dst, int16_t dct[4][8][8] )
     x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+8], dct[3] );
 }
 
+void x264_zigzag_scan_4x4_frame_altivec( int16_t level[16], int16_t dct[16] )
+{
+    vec_s16_t dct0v, dct1v;
+    vec_s16_t tmp0v, tmp1v;
+
+    dct0v = vec_ld(0x00, dct);
+    dct1v = vec_ld(0x10, dct);
+
+    const vec_u8_t sel0 = (vec_u8_t) CV(0,1,8,9,2,3,4,5,10,11,16,17,24,25,18,19);
+    const vec_u8_t sel1 = (vec_u8_t) CV(12,13,6,7,14,15,20,21,26,27,28,29,22,23,30,31);
+
+    tmp0v = vec_perm( dct0v, dct1v, sel0 );
+    tmp1v = vec_perm( dct0v, dct1v, sel1 );
+
+    vec_st( tmp0v, 0x00, level );
+    vec_st( tmp1v, 0x10, level );
+}
+
+void x264_zigzag_scan_4x4_field_altivec( int16_t level[16], int16_t dct[16] )
+{
+    vec_s16_t dct0v, dct1v;
+    vec_s16_t tmp0v, tmp1v;
+
+    dct0v = vec_ld(0x00, dct);
+    dct1v = vec_ld(0x10, dct);
+
+    const vec_u8_t sel0 = (vec_u8_t) CV(0,1,2,3,8,9,4,5,6,7,10,11,12,13,14,15);
+
+    tmp0v = vec_perm( dct0v, dct1v, sel0 );
+    tmp1v = dct1v;
+
+    vec_st( tmp0v, 0x00, level );
+    vec_st( tmp1v, 0x10, level );
+}
+#endif // !HIGH_BIT_DEPTH
+