/*****************************************************************************
- * dct.c: h264 encoder library
+ * dct.c: ppc transform and zigzag
*****************************************************************************
- * Copyright (C) 2003 Laurent Aimar
- * $Id$
+ * Copyright (C) 2003-2016 x264 project
*
- * Authors: Eric Petit <titer@m0k.org>
- * Guillaume Poirier <gpoirier@mplayerhq.hu>
+ * Authors: Guillaume Poirier <gpoirier@mplayerhq.hu>
+ * Eric Petit <eric.petit@lapsus.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at licensing@x264.com.
*****************************************************************************/
-#ifdef SYS_LINUX
-#include <altivec.h>
-#endif
-
#include "common/common.h"
#include "ppccommon.h"
+#if !HIGH_BIT_DEPTH
#define VEC_DCT(a0,a1,a2,a3,b0,b1,b2,b3) \
b1 = vec_add( a0, a3 ); \
b3 = vec_add( a1, a2 ); \
b3 = vec_sub( a0, a1 ); \
b3 = vec_sub( b3, a1 )
-void x264_sub4x4_dct_altivec( int16_t dct[4][4],
- uint8_t *pix1, uint8_t *pix2 )
+void x264_sub4x4_dct_altivec( int16_t dct[16], uint8_t *pix1, uint8_t *pix2 )
{
PREP_DIFF_8BYTEALIGNED;
vec_s16_t dct0v, dct1v, dct2v, dct3v;
permHighv = (vec_u8_t) CV(0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17);
VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
- vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, (int16_t*)dct);
- vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, (int16_t*)dct);
+ vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, dct);
+ vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, dct);
}
-void x264_sub8x8_dct_altivec( int16_t dct[4][4][4],
- uint8_t *pix1, uint8_t *pix2 )
+void x264_sub8x8_dct_altivec( int16_t dct[4][16], uint8_t *pix1, uint8_t *pix2 )
{
PREP_DIFF_8BYTEALIGNED;
vec_s16_t dct0v, dct1v, dct2v, dct3v, dct4v, dct5v, dct6v, dct7v;
VEC_DCT( dct0v, dct1v, dct2v, dct3v, tmp0v, tmp1v, tmp2v, tmp3v );
VEC_DCT( dct4v, dct5v, dct6v, dct7v, tmp4v, tmp5v, tmp6v, tmp7v );
- vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, (int16_t*)dct);
- vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, (int16_t*)dct);
- vec_st(vec_perm(tmp4v, tmp5v, permHighv), 32, (int16_t*)dct);
- vec_st(vec_perm(tmp6v, tmp7v, permHighv), 48, (int16_t*)dct);
- vec_st(vec_perm(tmp0v, tmp1v, permLowv), 64, (int16_t*)dct);
- vec_st(vec_perm(tmp2v, tmp3v, permLowv), 80, (int16_t*)dct);
- vec_st(vec_perm(tmp4v, tmp5v, permLowv), 96, (int16_t*)dct);
- vec_st(vec_perm(tmp6v, tmp7v, permLowv), 112, (int16_t*)dct);
+ vec_st(vec_perm(tmp0v, tmp1v, permHighv), 0, *dct);
+ vec_st(vec_perm(tmp2v, tmp3v, permHighv), 16, *dct);
+ vec_st(vec_perm(tmp4v, tmp5v, permHighv), 32, *dct);
+ vec_st(vec_perm(tmp6v, tmp7v, permHighv), 48, *dct);
+ vec_st(vec_perm(tmp0v, tmp1v, permLowv), 64, *dct);
+ vec_st(vec_perm(tmp2v, tmp3v, permLowv), 80, *dct);
+ vec_st(vec_perm(tmp4v, tmp5v, permLowv), 96, *dct);
+ vec_st(vec_perm(tmp6v, tmp7v, permLowv), 112, *dct);
}
-void x264_sub16x16_dct_altivec( int16_t dct[16][4][4],
- uint8_t *pix1, uint8_t *pix2 )
+void x264_sub16x16_dct_altivec( int16_t dct[16][16], uint8_t *pix1, uint8_t *pix2 )
{
x264_sub8x8_dct_altivec( &dct[ 0], &pix1[0], &pix2[0] );
x264_sub8x8_dct_altivec( &dct[ 4], &pix1[8], &pix2[8] );
}
-void x264_sub8x8_dct8_altivec( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 )
+void x264_sub8x8_dct8_altivec( int16_t dct[64], uint8_t *pix1, uint8_t *pix2 )
{
vec_u16_t onev = vec_splat_u16(1);
vec_u16_t twov = vec_add( onev, onev );
DCT8_1D_ALTIVEC( dct_tr0v, dct_tr1v, dct_tr2v, dct_tr3v,
dct_tr4v, dct_tr5v, dct_tr6v, dct_tr7v );
- vec_st( dct_tr0v, 0, (signed short *)dct );
- vec_st( dct_tr1v, 16, (signed short *)dct );
- vec_st( dct_tr2v, 32, (signed short *)dct );
- vec_st( dct_tr3v, 48, (signed short *)dct );
-
- vec_st( dct_tr4v, 64, (signed short *)dct );
- vec_st( dct_tr5v, 80, (signed short *)dct );
- vec_st( dct_tr6v, 96, (signed short *)dct );
- vec_st( dct_tr7v, 112, (signed short *)dct );
+ vec_st( dct_tr0v, 0, dct );
+ vec_st( dct_tr1v, 16, dct );
+ vec_st( dct_tr2v, 32, dct );
+ vec_st( dct_tr3v, 48, dct );
+
+ vec_st( dct_tr4v, 64, dct );
+ vec_st( dct_tr5v, 80, dct );
+ vec_st( dct_tr6v, 96, dct );
+ vec_st( dct_tr7v, 112, dct );
}
-void x264_sub16x16_dct8_altivec( int16_t dct[4][8][8], uint8_t *pix1, uint8_t *pix2 )
+void x264_sub16x16_dct8_altivec( int16_t dct[4][64], uint8_t *pix1, uint8_t *pix2 )
{
x264_sub8x8_dct8_altivec( dct[0], &pix1[0], &pix2[0] );
x264_sub8x8_dct8_altivec( dct[1], &pix1[8], &pix2[8] );
vec_u8_t lv = vec_ld(0, dest); \
vec_u8_t dstv = vec_perm(lv, zero_u8v, (vec_u8_t)perm_ldv); \
vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
- vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv); \
+ vec_u16_t dst16 = vec_u8_to_u16_h(dstv); \
vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
vec_u8_t idstsum8 = vec_s16_to_u8(idstsum); \
/* unaligned store */ \
vec_ste(bodyv, element, (uint32_t *)dest); \
}
-void x264_add4x4_idct_altivec( uint8_t *dst, int16_t dct[4][4] )
+void x264_add4x4_idct_altivec( uint8_t *dst, int16_t dct[16] )
{
vec_u16_t onev = vec_splat_u16(1);
- dct[0][0] += 32; // rounding for the >>6 at the end
+ dct[0] += 32; // rounding for the >>6 at the end
vec_s16_t s0, s1, s2, s3;
- s0 = vec_ld( 0x00, (int16_t*)dct );
+ s0 = vec_ld( 0x00, dct );
s1 = vec_sld( s0, s0, 8 );
- s2 = vec_ld( 0x10, (int16_t*)dct );
+ s2 = vec_ld( 0x10, dct );
s3 = vec_sld( s2, s2, 8 );
vec_s16_t d0, d1, d2, d3;
ALTIVEC_STORE4_SUM_CLIP( &dst[3*FDEC_STRIDE], idct3, perm_ldv );
}
-void x264_add8x8_idct_altivec( uint8_t *p_dst, int16_t dct[4][4][4] )
+void x264_add8x8_idct_altivec( uint8_t *p_dst, int16_t dct[4][16] )
{
x264_add4x4_idct_altivec( &p_dst[0], dct[0] );
x264_add4x4_idct_altivec( &p_dst[4], dct[1] );
x264_add4x4_idct_altivec( &p_dst[4*FDEC_STRIDE+4], dct[3] );
}
-void x264_add16x16_idct_altivec( uint8_t *p_dst, int16_t dct[16][4][4] )
+void x264_add16x16_idct_altivec( uint8_t *p_dst, int16_t dct[16][16] )
{
x264_add8x8_idct_altivec( &p_dst[0], &dct[0] );
x264_add8x8_idct_altivec( &p_dst[8], &dct[4] );
vec_u8_t lv = vec_ld( 7, dest ); \
vec_u8_t dstv = vec_perm( hv, lv, (vec_u8_t)perm_ldv ); \
vec_s16_t idct_sh6 = vec_sra(idctv, sixv); \
- vec_u16_t dst16 = (vec_u16_t)vec_mergeh(zero_u8v, dstv); \
+ vec_u16_t dst16 = vec_u8_to_u16_h(dstv); \
vec_s16_t idstsum = vec_adds(idct_sh6, (vec_s16_t)dst16); \
vec_u8_t idstsum8 = vec_packsu(zero_s16v, idstsum); \
/* unaligned store */ \
vec_st( hv, 0, dest ); \
}
-void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[8][8] )
+void x264_add8x8_idct8_altivec( uint8_t *dst, int16_t dct[64] )
{
vec_u16_t onev = vec_splat_u16(1);
vec_u16_t twov = vec_splat_u16(2);
- dct[0][0] += 32; // rounding for the >>6 at the end
+ dct[0] += 32; // rounding for the >>6 at the end
vec_s16_t s0, s1, s2, s3, s4, s5, s6, s7;
- s0 = vec_ld(0x00, (int16_t*)dct);
- s1 = vec_ld(0x10, (int16_t*)dct);
- s2 = vec_ld(0x20, (int16_t*)dct);
- s3 = vec_ld(0x30, (int16_t*)dct);
- s4 = vec_ld(0x40, (int16_t*)dct);
- s5 = vec_ld(0x50, (int16_t*)dct);
- s6 = vec_ld(0x60, (int16_t*)dct);
- s7 = vec_ld(0x70, (int16_t*)dct);
+ s0 = vec_ld(0x00, dct);
+ s1 = vec_ld(0x10, dct);
+ s2 = vec_ld(0x20, dct);
+ s3 = vec_ld(0x30, dct);
+ s4 = vec_ld(0x40, dct);
+ s5 = vec_ld(0x50, dct);
+ s6 = vec_ld(0x60, dct);
+ s7 = vec_ld(0x70, dct);
vec_s16_t d0, d1, d2, d3, d4, d5, d6, d7;
IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7);
ALTIVEC_STORE_SUM_CLIP(&dst[7*FDEC_STRIDE], idct7, perm_ldv, perm_stv, sel);
}
-void x264_add16x16_idct8_altivec( uint8_t *dst, int16_t dct[4][8][8] )
+void x264_add16x16_idct8_altivec( uint8_t *dst, int16_t dct[4][64] )
{
x264_add8x8_idct8_altivec( &dst[0], dct[0] );
x264_add8x8_idct8_altivec( &dst[8], dct[1] );
x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+0], dct[2] );
x264_add8x8_idct8_altivec( &dst[8*FDEC_STRIDE+8], dct[3] );
}
+
+void x264_zigzag_scan_4x4_frame_altivec( int16_t level[16], int16_t dct[16] )
+{
+ vec_s16_t dct0v, dct1v;
+ vec_s16_t tmp0v, tmp1v;
+
+ dct0v = vec_ld(0x00, dct);
+ dct1v = vec_ld(0x10, dct);
+
+ const vec_u8_t sel0 = (vec_u8_t) CV(0,1,8,9,2,3,4,5,10,11,16,17,24,25,18,19);
+ const vec_u8_t sel1 = (vec_u8_t) CV(12,13,6,7,14,15,20,21,26,27,28,29,22,23,30,31);
+
+ tmp0v = vec_perm( dct0v, dct1v, sel0 );
+ tmp1v = vec_perm( dct0v, dct1v, sel1 );
+
+ vec_st( tmp0v, 0x00, level );
+ vec_st( tmp1v, 0x10, level );
+}
+
+void x264_zigzag_scan_4x4_field_altivec( int16_t level[16], int16_t dct[16] )
+{
+ vec_s16_t dct0v, dct1v;
+ vec_s16_t tmp0v, tmp1v;
+
+ dct0v = vec_ld(0x00, dct);
+ dct1v = vec_ld(0x10, dct);
+
+ const vec_u8_t sel0 = (vec_u8_t) CV(0,1,2,3,8,9,4,5,6,7,10,11,12,13,14,15);
+
+ tmp0v = vec_perm( dct0v, dct1v, sel0 );
+ tmp1v = dct1v;
+
+ vec_st( tmp0v, 0x00, level );
+ vec_st( tmp1v, 0x10, level );
+}
+#endif // !HIGH_BIT_DEPTH
+