/*****************************************************************************
- * dct.c: h264 encoder library
+ * dct.c: transform and zigzag
*****************************************************************************
- * Copyright (C) 2003-2008 x264 project
+ * Copyright (C) 2003-2016 x264 project
*
* Authors: Loren Merritt <lorenm@u.washington.edu>
* Laurent Aimar <fenrir@via.ecp.fr>
+ * Henrik Gramner <henrik@gramner.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "common.h"
-#ifdef HAVE_MMX
+#if HAVE_MMX
# include "x86/dct.h"
#endif
-#ifdef ARCH_PPC
+#if ARCH_PPC
# include "ppc/dct.h"
#endif
-#ifdef ARCH_ARM
+#if ARCH_ARM
# include "arm/dct.h"
#endif
+#if ARCH_AARCH64
+# include "aarch64/dct.h"
+#endif
+#if ARCH_MIPS
+# include "mips/dct.h"
+#endif
-int x264_dct4_weight2_zigzag[2][16];
-int x264_dct8_weight2_zigzag[2][64];
-
-static void dct4x4dc( int16_t d[16] )
+/* the inverse of the scaling factors introduced by 8x8 fdct */
+/* uint32 is for the asm implementation of trellis. the actual values fit in uint16. */
+#define W(i) (i==0 ? FIX8(1.0000) :\
+ i==1 ? FIX8(0.8859) :\
+ i==2 ? FIX8(1.6000) :\
+ i==3 ? FIX8(0.9415) :\
+ i==4 ? FIX8(1.2651) :\
+ i==5 ? FIX8(1.1910) :0)
+const uint32_t x264_dct8_weight_tab[64] = {
+ W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+ W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+
+ W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+ W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1)
+};
+#undef W
+
+#define W(i) (i==0 ? FIX8(1.76777) :\
+ i==1 ? FIX8(1.11803) :\
+ i==2 ? FIX8(0.70711) :0)
+const uint32_t x264_dct4_weight_tab[16] = {
+ W(0), W(1), W(0), W(1),
+ W(1), W(2), W(1), W(2),
+ W(0), W(1), W(0), W(1),
+ W(1), W(2), W(1), W(2)
+};
+#undef W
+
+/* inverse squared */
+#define W(i) (i==0 ? FIX8(3.125) :\
+ i==1 ? FIX8(1.25) :\
+ i==2 ? FIX8(0.5) :0)
+const uint32_t x264_dct4_weight2_tab[16] = {
+ W(0), W(1), W(0), W(1),
+ W(1), W(2), W(1), W(2),
+ W(0), W(1), W(0), W(1),
+ W(1), W(2), W(1), W(2)
+};
+#undef W
+
+#define W(i) (i==0 ? FIX8(1.00000) :\
+ i==1 ? FIX8(0.78487) :\
+ i==2 ? FIX8(2.56132) :\
+ i==3 ? FIX8(0.88637) :\
+ i==4 ? FIX8(1.60040) :\
+ i==5 ? FIX8(1.41850) :0)
+const uint32_t x264_dct8_weight2_tab[64] = {
+ W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+ W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+
+ W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+ W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1)
+};
+#undef W
+
+
+static void dct4x4dc( dctcoef d[16] )
{
- int16_t tmp[16];
+ dctcoef tmp[16];
for( int i = 0; i < 4; i++ )
{
}
}
-static void idct4x4dc( int16_t d[16] )
+static void idct4x4dc( dctcoef d[16] )
{
- int16_t tmp[16];
+ dctcoef tmp[16];
for( int i = 0; i < 4; i++ )
{
}
}
-static inline void pixel_sub_wxh( int16_t *diff, int i_size,
+static void dct2x4dc( dctcoef dct[8], dctcoef dct4x4[8][16] )
+{
+ int a0 = dct4x4[0][0] + dct4x4[1][0];
+ int a1 = dct4x4[2][0] + dct4x4[3][0];
+ int a2 = dct4x4[4][0] + dct4x4[5][0];
+ int a3 = dct4x4[6][0] + dct4x4[7][0];
+ int a4 = dct4x4[0][0] - dct4x4[1][0];
+ int a5 = dct4x4[2][0] - dct4x4[3][0];
+ int a6 = dct4x4[4][0] - dct4x4[5][0];
+ int a7 = dct4x4[6][0] - dct4x4[7][0];
+ int b0 = a0 + a1;
+ int b1 = a2 + a3;
+ int b2 = a4 + a5;
+ int b3 = a6 + a7;
+ int b4 = a0 - a1;
+ int b5 = a2 - a3;
+ int b6 = a4 - a5;
+ int b7 = a6 - a7;
+ dct[0] = b0 + b1;
+ dct[1] = b2 + b3;
+ dct[2] = b0 - b1;
+ dct[3] = b2 - b3;
+ dct[4] = b4 - b5;
+ dct[5] = b6 - b7;
+ dct[6] = b4 + b5;
+ dct[7] = b6 + b7;
+ dct4x4[0][0] = 0;
+ dct4x4[1][0] = 0;
+ dct4x4[2][0] = 0;
+ dct4x4[3][0] = 0;
+ dct4x4[4][0] = 0;
+ dct4x4[5][0] = 0;
+ dct4x4[6][0] = 0;
+ dct4x4[7][0] = 0;
+}
+
+static inline void pixel_sub_wxh( dctcoef *diff, int i_size,
pixel *pix1, int i_pix1, pixel *pix2, int i_pix2 )
{
for( int y = 0; y < i_size; y++ )
}
}
-static void sub4x4_dct( int16_t dct[16], pixel *pix1, pixel *pix2 )
+static void sub4x4_dct( dctcoef dct[16], pixel *pix1, pixel *pix2 )
{
- int16_t d[16];
- int16_t tmp[16];
+ dctcoef d[16];
+ dctcoef tmp[16];
pixel_sub_wxh( d, 4, pix1, FENC_STRIDE, pix2, FDEC_STRIDE );
}
}
-static void sub8x8_dct( int16_t dct[4][16], pixel *pix1, pixel *pix2 )
+static void sub8x8_dct( dctcoef dct[4][16], pixel *pix1, pixel *pix2 )
{
sub4x4_dct( dct[0], &pix1[0], &pix2[0] );
sub4x4_dct( dct[1], &pix1[4], &pix2[4] );
sub4x4_dct( dct[3], &pix1[4*FENC_STRIDE+4], &pix2[4*FDEC_STRIDE+4] );
}
-static void sub16x16_dct( int16_t dct[16][16], pixel *pix1, pixel *pix2 )
+static void sub16x16_dct( dctcoef dct[16][16], pixel *pix1, pixel *pix2 )
{
sub8x8_dct( &dct[ 0], &pix1[0], &pix2[0] );
sub8x8_dct( &dct[ 4], &pix1[8], &pix2[8] );
static int sub4x4_dct_dc( pixel *pix1, pixel *pix2 )
{
- int16_t d[16];
int sum = 0;
-
- pixel_sub_wxh( d, 4, pix1, FENC_STRIDE, pix2, FDEC_STRIDE );
-
- sum += d[0] + d[1] + d[2] + d[3] + d[4] + d[5] + d[6] + d[7];
- sum += d[8] + d[9] + d[10] + d[11] + d[12] + d[13] + d[14] + d[15];
-
+ for( int i=0; i<4; i++, pix1 += FENC_STRIDE, pix2 += FDEC_STRIDE )
+ sum += pix1[0] + pix1[1] + pix1[2] + pix1[3]
+ - pix2[0] - pix2[1] - pix2[2] - pix2[3];
return sum;
}
-static void sub8x8_dct_dc( int16_t dct[4], pixel *pix1, pixel *pix2 )
+static void sub8x8_dct_dc( dctcoef dct[4], pixel *pix1, pixel *pix2 )
{
dct[0] = sub4x4_dct_dc( &pix1[0], &pix2[0] );
dct[1] = sub4x4_dct_dc( &pix1[4], &pix2[4] );
int d2 = dct[0] - dct[1];
int d3 = dct[2] - dct[3];
dct[0] = d0 + d1;
- dct[2] = d2 + d3;
dct[1] = d0 - d1;
+ dct[2] = d2 + d3;
dct[3] = d2 - d3;
}
-static void add4x4_idct( pixel *p_dst, int16_t dct[16] )
+static void sub8x16_dct_dc( dctcoef dct[8], pixel *pix1, pixel *pix2 )
{
- int16_t d[16];
- int16_t tmp[16];
+ int a0 = sub4x4_dct_dc( &pix1[ 0*FENC_STRIDE+0], &pix2[ 0*FDEC_STRIDE+0] );
+ int a1 = sub4x4_dct_dc( &pix1[ 0*FENC_STRIDE+4], &pix2[ 0*FDEC_STRIDE+4] );
+ int a2 = sub4x4_dct_dc( &pix1[ 4*FENC_STRIDE+0], &pix2[ 4*FDEC_STRIDE+0] );
+ int a3 = sub4x4_dct_dc( &pix1[ 4*FENC_STRIDE+4], &pix2[ 4*FDEC_STRIDE+4] );
+ int a4 = sub4x4_dct_dc( &pix1[ 8*FENC_STRIDE+0], &pix2[ 8*FDEC_STRIDE+0] );
+ int a5 = sub4x4_dct_dc( &pix1[ 8*FENC_STRIDE+4], &pix2[ 8*FDEC_STRIDE+4] );
+ int a6 = sub4x4_dct_dc( &pix1[12*FENC_STRIDE+0], &pix2[12*FDEC_STRIDE+0] );
+ int a7 = sub4x4_dct_dc( &pix1[12*FENC_STRIDE+4], &pix2[12*FDEC_STRIDE+4] );
+
+ /* 2x4 DC transform */
+ int b0 = a0 + a1;
+ int b1 = a2 + a3;
+ int b2 = a4 + a5;
+ int b3 = a6 + a7;
+ int b4 = a0 - a1;
+ int b5 = a2 - a3;
+ int b6 = a4 - a5;
+ int b7 = a6 - a7;
+ a0 = b0 + b1;
+ a1 = b2 + b3;
+ a2 = b4 + b5;
+ a3 = b6 + b7;
+ a4 = b0 - b1;
+ a5 = b2 - b3;
+ a6 = b4 - b5;
+ a7 = b6 - b7;
+ dct[0] = a0 + a1;
+ dct[1] = a2 + a3;
+ dct[2] = a0 - a1;
+ dct[3] = a2 - a3;
+ dct[4] = a4 - a5;
+ dct[5] = a6 - a7;
+ dct[6] = a4 + a5;
+ dct[7] = a6 + a7;
+}
+
+static void add4x4_idct( pixel *p_dst, dctcoef dct[16] )
+{
+ dctcoef d[16];
+ dctcoef tmp[16];
for( int i = 0; i < 4; i++ )
{
}
}
-static void add8x8_idct( pixel *p_dst, int16_t dct[4][16] )
+static void add8x8_idct( pixel *p_dst, dctcoef dct[4][16] )
{
add4x4_idct( &p_dst[0], dct[0] );
add4x4_idct( &p_dst[4], dct[1] );
add4x4_idct( &p_dst[4*FDEC_STRIDE+4], dct[3] );
}
-static void add16x16_idct( pixel *p_dst, int16_t dct[16][16] )
+static void add16x16_idct( pixel *p_dst, dctcoef dct[16][16] )
{
add8x8_idct( &p_dst[0], &dct[0] );
add8x8_idct( &p_dst[8], &dct[4] );
DST(7) = (a4>>2) - a7 ;\
}
-static void sub8x8_dct8( int16_t dct[64], pixel *pix1, pixel *pix2 )
+static void sub8x8_dct8( dctcoef dct[64], pixel *pix1, pixel *pix2 )
{
- int16_t tmp[64];
+ dctcoef tmp[64];
pixel_sub_wxh( tmp, 8, pix1, FENC_STRIDE, pix2, FDEC_STRIDE );
#undef DST
}
-static void sub16x16_dct8( int16_t dct[4][64], pixel *pix1, pixel *pix2 )
+static void sub16x16_dct8( dctcoef dct[4][64], pixel *pix1, pixel *pix2 )
{
sub8x8_dct8( dct[0], &pix1[0], &pix2[0] );
sub8x8_dct8( dct[1], &pix1[8], &pix2[8] );
DST(7, b0 - b7);\
}
-static void add8x8_idct8( pixel *dst, int16_t dct[64] )
+static void add8x8_idct8( pixel *dst, dctcoef dct[64] )
{
dct[0] += 32; // rounding for the >>6 at the end
#undef DST
}
-static void add16x16_idct8( pixel *dst, int16_t dct[4][64] )
+static void add16x16_idct8( pixel *dst, dctcoef dct[4][64] )
{
add8x8_idct8( &dst[0], dct[0] );
add8x8_idct8( &dst[8], dct[1] );
add8x8_idct8( &dst[8*FDEC_STRIDE+8], dct[3] );
}
-static void inline add4x4_idct_dc( pixel *p_dst, int16_t dc )
+static void inline add4x4_idct_dc( pixel *p_dst, dctcoef dc )
{
dc = (dc + 32) >> 6;
for( int i = 0; i < 4; i++, p_dst += FDEC_STRIDE )
}
}
-static void add8x8_idct_dc( pixel *p_dst, int16_t dct[4] )
+static void add8x8_idct_dc( pixel *p_dst, dctcoef dct[4] )
{
add4x4_idct_dc( &p_dst[0], dct[0] );
add4x4_idct_dc( &p_dst[4], dct[1] );
add4x4_idct_dc( &p_dst[4*FDEC_STRIDE+4], dct[3] );
}
-static void add16x16_idct_dc( pixel *p_dst, int16_t dct[16] )
+static void add16x16_idct_dc( pixel *p_dst, dctcoef dct[16] )
{
for( int i = 0; i < 4; i++, dct += 4, p_dst += 4*FDEC_STRIDE )
{
dctf->add8x8_idct = add8x8_idct;
dctf->add8x8_idct_dc = add8x8_idct_dc;
+ dctf->sub8x16_dct_dc = sub8x16_dct_dc;
+
dctf->sub16x16_dct = sub16x16_dct;
dctf->add16x16_idct = add16x16_idct;
dctf->add16x16_idct_dc = add16x16_idct_dc;
dctf->dct4x4dc = dct4x4dc;
dctf->idct4x4dc = idct4x4dc;
-#ifdef HAVE_MMX
+ dctf->dct2x4dc = dct2x4dc;
+
+#if HIGH_BIT_DEPTH
+#if HAVE_MMX
+ if( cpu&X264_CPU_MMX )
+ {
+ dctf->sub4x4_dct = x264_sub4x4_dct_mmx;
+ dctf->sub8x8_dct = x264_sub8x8_dct_mmx;
+ dctf->sub16x16_dct = x264_sub16x16_dct_mmx;
+ }
+ if( cpu&X264_CPU_SSE2 )
+ {
+ dctf->add4x4_idct = x264_add4x4_idct_sse2;
+ dctf->dct4x4dc = x264_dct4x4dc_sse2;
+ dctf->idct4x4dc = x264_idct4x4dc_sse2;
+ dctf->dct2x4dc = x264_dct2x4dc_sse2;
+ dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse2;
+ dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse2;
+ dctf->add8x8_idct = x264_add8x8_idct_sse2;
+ dctf->add16x16_idct = x264_add16x16_idct_sse2;
+ dctf->add8x8_idct8 = x264_add8x8_idct8_sse2;
+ dctf->add16x16_idct8 = x264_add16x16_idct8_sse2;
+ dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_sse2;
+ dctf->add8x8_idct_dc = x264_add8x8_idct_dc_sse2;
+ dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_sse2;
+ dctf->add16x16_idct_dc= x264_add16x16_idct_dc_sse2;
+ }
+ if( cpu&X264_CPU_SSE4 )
+ {
+ dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse4;
+ dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse4;
+ }
+ if( cpu&X264_CPU_AVX )
+ {
+ dctf->add4x4_idct = x264_add4x4_idct_avx;
+ dctf->dct4x4dc = x264_dct4x4dc_avx;
+ dctf->idct4x4dc = x264_idct4x4dc_avx;
+ dctf->dct2x4dc = x264_dct2x4dc_avx;
+ dctf->sub8x8_dct8 = x264_sub8x8_dct8_avx;
+ dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx;
+ dctf->add8x8_idct = x264_add8x8_idct_avx;
+ dctf->add16x16_idct = x264_add16x16_idct_avx;
+ dctf->add8x8_idct8 = x264_add8x8_idct8_avx;
+ dctf->add16x16_idct8 = x264_add16x16_idct8_avx;
+ dctf->add8x8_idct_dc = x264_add8x8_idct_dc_avx;
+ dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_avx;
+ dctf->add16x16_idct_dc= x264_add16x16_idct_dc_avx;
+ }
+#endif // HAVE_MMX
+#else // !HIGH_BIT_DEPTH
+#if HAVE_MMX
if( cpu&X264_CPU_MMX )
{
dctf->sub4x4_dct = x264_sub4x4_dct_mmx;
dctf->add4x4_idct = x264_add4x4_idct_mmx;
- dctf->add8x8_idct_dc = x264_add8x8_idct_dc_mmx;
- dctf->add16x16_idct_dc = x264_add16x16_idct_dc_mmx;
- dctf->dct4x4dc = x264_dct4x4dc_mmx;
dctf->idct4x4dc = x264_idct4x4dc_mmx;
- dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_mmxext;
+ dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_mmx2;
-#ifndef ARCH_X86_64
+#if !ARCH_X86_64
dctf->sub8x8_dct = x264_sub8x8_dct_mmx;
dctf->sub16x16_dct = x264_sub16x16_dct_mmx;
dctf->add8x8_idct = x264_add8x8_idct_mmx;
#endif
}
+ if( cpu&X264_CPU_MMX2 )
+ {
+ dctf->dct4x4dc = x264_dct4x4dc_mmx2;
+ dctf->dct2x4dc = x264_dct2x4dc_mmx2;
+ dctf->add8x8_idct_dc = x264_add8x8_idct_dc_mmx2;
+ dctf->add16x16_idct_dc = x264_add16x16_idct_dc_mmx2;
+ }
+
if( cpu&X264_CPU_SSE2 )
{
dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse2;
dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse2;
dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_sse2;
+ dctf->sub8x16_dct_dc= x264_sub8x16_dct_dc_sse2;
dctf->add8x8_idct8 = x264_add8x8_idct8_sse2;
dctf->add16x16_idct8= x264_add16x16_idct8_sse2;
- dctf->sub8x8_dct = x264_sub8x8_dct_sse2;
- dctf->sub16x16_dct = x264_sub16x16_dct_sse2;
- dctf->add8x8_idct = x264_add8x8_idct_sse2;
- dctf->add16x16_idct = x264_add16x16_idct_sse2;
- dctf->add16x16_idct_dc = x264_add16x16_idct_dc_sse2;
+ if( !(cpu&X264_CPU_SSE2_IS_SLOW) )
+ {
+ dctf->sub8x8_dct = x264_sub8x8_dct_sse2;
+ dctf->sub16x16_dct = x264_sub16x16_dct_sse2;
+ dctf->add8x8_idct = x264_add8x8_idct_sse2;
+ dctf->add16x16_idct = x264_add16x16_idct_sse2;
+ dctf->add16x16_idct_dc = x264_add16x16_idct_dc_sse2;
+ }
}
- if( (cpu&X264_CPU_SSSE3) && !(cpu&X264_CPU_SLOW_ATOM) )
+ if( (cpu&X264_CPU_SSSE3) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
{
- dctf->sub4x4_dct = x264_sub4x4_dct_ssse3;
- dctf->sub8x8_dct = x264_sub8x8_dct_ssse3;
- dctf->sub16x16_dct = x264_sub16x16_dct_ssse3;
- dctf->sub8x8_dct8 = x264_sub8x8_dct8_ssse3;
- dctf->sub16x16_dct8 = x264_sub16x16_dct8_ssse3;
- dctf->add8x8_idct_dc = x264_add8x8_idct_dc_ssse3;
- dctf->add16x16_idct_dc = x264_add16x16_idct_dc_ssse3;
+ dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_ssse3;
+ if( !(cpu&X264_CPU_SLOW_ATOM) )
+ {
+ dctf->sub4x4_dct = x264_sub4x4_dct_ssse3;
+ dctf->sub8x8_dct = x264_sub8x8_dct_ssse3;
+ dctf->sub16x16_dct = x264_sub16x16_dct_ssse3;
+ dctf->sub8x8_dct8 = x264_sub8x8_dct8_ssse3;
+ dctf->sub16x16_dct8 = x264_sub16x16_dct8_ssse3;
+ if( !(cpu&X264_CPU_SLOW_PSHUFB) )
+ {
+ dctf->add8x8_idct_dc = x264_add8x8_idct_dc_ssse3;
+ dctf->add16x16_idct_dc = x264_add16x16_idct_dc_ssse3;
+ }
+ }
}
if( cpu&X264_CPU_SSE4 )
dctf->add4x4_idct = x264_add4x4_idct_sse4;
+ if( cpu&X264_CPU_AVX )
+ {
+ dctf->add4x4_idct = x264_add4x4_idct_avx;
+ dctf->add8x8_idct = x264_add8x8_idct_avx;
+ dctf->add16x16_idct = x264_add16x16_idct_avx;
+ dctf->add8x8_idct8 = x264_add8x8_idct8_avx;
+ dctf->add16x16_idct8 = x264_add16x16_idct8_avx;
+ dctf->add16x16_idct_dc = x264_add16x16_idct_dc_avx;
+ dctf->sub8x8_dct = x264_sub8x8_dct_avx;
+ dctf->sub16x16_dct = x264_sub16x16_dct_avx;
+ dctf->sub8x8_dct8 = x264_sub8x8_dct8_avx;
+ dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx;
+ }
+
+ if( cpu&X264_CPU_XOP )
+ {
+ dctf->sub8x8_dct = x264_sub8x8_dct_xop;
+ dctf->sub16x16_dct = x264_sub16x16_dct_xop;
+ }
+
+ if( cpu&X264_CPU_AVX2 )
+ {
+ dctf->add8x8_idct = x264_add8x8_idct_avx2;
+ dctf->add16x16_idct = x264_add16x16_idct_avx2;
+ dctf->sub8x8_dct = x264_sub8x8_dct_avx2;
+ dctf->sub16x16_dct = x264_sub16x16_dct_avx2;
+ dctf->add16x16_idct_dc = x264_add16x16_idct_dc_avx2;
+#if ARCH_X86_64
+ dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx2;
+#endif
+ }
#endif //HAVE_MMX
-#ifdef HAVE_ALTIVEC
+#if HAVE_ALTIVEC
if( cpu&X264_CPU_ALTIVEC )
{
dctf->sub4x4_dct = x264_sub4x4_dct_altivec;
}
#endif
-#ifdef HAVE_ARMV6
+#if HAVE_ARMV6 || ARCH_AARCH64
if( cpu&X264_CPU_NEON )
{
dctf->sub4x4_dct = x264_sub4x4_dct_neon;
dctf->add8x8_idct8 = x264_add8x8_idct8_neon;
dctf->add16x16_idct8= x264_add16x16_idct8_neon;
+ dctf->sub8x16_dct_dc= x264_sub8x16_dct_dc_neon;
}
#endif
-}
-void x264_dct_init_weights( void )
-{
- for( int j = 0; j < 2; j++ )
+#if HAVE_MSA
+ if( cpu&X264_CPU_MSA )
{
- for( int i = 0; i < 16; i++ )
- x264_dct4_weight2_zigzag[j][i] = x264_dct4_weight2_tab[ x264_zigzag_scan4[j][i] ];
- for( int i = 0; i < 64; i++ )
- x264_dct8_weight2_zigzag[j][i] = x264_dct8_weight2_tab[ x264_zigzag_scan8[j][i] ];
+ dctf->sub4x4_dct = x264_sub4x4_dct_msa;
+ dctf->sub8x8_dct = x264_sub8x8_dct_msa;
+ dctf->sub16x16_dct = x264_sub16x16_dct_msa;
+ dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_msa;
+ dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_msa;
+ dctf->dct4x4dc = x264_dct4x4dc_msa;
+ dctf->idct4x4dc = x264_idct4x4dc_msa;
+ dctf->add4x4_idct = x264_add4x4_idct_msa;
+ dctf->add8x8_idct = x264_add8x8_idct_msa;
+ dctf->add8x8_idct_dc = x264_add8x8_idct_dc_msa;
+ dctf->add16x16_idct = x264_add16x16_idct_msa;
+ dctf->add16x16_idct_dc = x264_add16x16_idct_dc_msa;
+ dctf->add8x8_idct8 = x264_add8x8_idct8_msa;
+ dctf->add16x16_idct8 = x264_add16x16_idct8_msa;
}
+#endif
+
+#endif // HIGH_BIT_DEPTH
}
ZIG( 8,0,2) ZIG( 9,1,2) ZIG(10,2,2) ZIG(11,3,2)\
ZIG(12,0,3) ZIG(13,1,3) ZIG(14,2,3) ZIG(15,3,3)
-static void zigzag_scan_8x8_frame( int16_t level[64], int16_t dct[64] )
+static void zigzag_scan_8x8_frame( dctcoef level[64], dctcoef dct[64] )
{
ZIGZAG8_FRAME
}
-static void zigzag_scan_8x8_field( int16_t level[64], int16_t dct[64] )
+static void zigzag_scan_8x8_field( dctcoef level[64], dctcoef dct[64] )
{
ZIGZAG8_FIELD
}
#define ZIG(i,y,x) level[i] = dct[x*4+y];
#define ZIGDC(i,y,x) ZIG(i,y,x)
-static void zigzag_scan_4x4_frame( int16_t level[16], int16_t dct[16] )
+static void zigzag_scan_4x4_frame( dctcoef level[16], dctcoef dct[16] )
{
ZIGZAG4_FRAME
}
-static void zigzag_scan_4x4_field( int16_t level[16], int16_t dct[16] )
+static void zigzag_scan_4x4_field( dctcoef level[16], dctcoef dct[16] )
{
- CP32( level, dct );
+ memcpy( level, dct, 2 * sizeof(dctcoef) );
ZIG(2,0,1) ZIG(3,2,0) ZIG(4,3,0) ZIG(5,1,1)
- CP32( level+6, dct+6 );
- CP64( level+8, dct+8 );
- CP64( level+12, dct+12 );
+ memcpy( level+6, dct+6, 10 * sizeof(dctcoef) );
}
#undef ZIG
CPPIXEL_X4( p_dst+1*FDEC_STRIDE, p_src+1*FENC_STRIDE );\
CPPIXEL_X4( p_dst+2*FDEC_STRIDE, p_src+2*FENC_STRIDE );\
CPPIXEL_X4( p_dst+3*FDEC_STRIDE, p_src+3*FENC_STRIDE );
+#define CPPIXEL_X8(dst,src) ( CPPIXEL_X4(dst,src), CPPIXEL_X4(dst+4,src+4) )
#define COPY8x8\
CPPIXEL_X8( p_dst+0*FDEC_STRIDE, p_src+0*FENC_STRIDE );\
CPPIXEL_X8( p_dst+1*FDEC_STRIDE, p_src+1*FENC_STRIDE );\
CPPIXEL_X8( p_dst+6*FDEC_STRIDE, p_src+6*FENC_STRIDE );\
CPPIXEL_X8( p_dst+7*FDEC_STRIDE, p_src+7*FENC_STRIDE );
-static int zigzag_sub_4x4_frame( int16_t level[16], const pixel *p_src, pixel *p_dst )
+static int zigzag_sub_4x4_frame( dctcoef level[16], const pixel *p_src, pixel *p_dst )
{
int nz = 0;
ZIGZAG4_FRAME
return !!nz;
}
-static int zigzag_sub_4x4_field( int16_t level[16], const pixel *p_src, pixel *p_dst )
+static int zigzag_sub_4x4_field( dctcoef level[16], const pixel *p_src, pixel *p_dst )
{
int nz = 0;
ZIGZAG4_FIELD
level[0] = 0;\
}
-static int zigzag_sub_4x4ac_frame( int16_t level[16], const pixel *p_src, pixel *p_dst, int16_t *dc )
+static int zigzag_sub_4x4ac_frame( dctcoef level[16], const pixel *p_src, pixel *p_dst, dctcoef *dc )
{
int nz = 0;
ZIGZAG4_FRAME
return !!nz;
}
-static int zigzag_sub_4x4ac_field( int16_t level[16], const pixel *p_src, pixel *p_dst, int16_t *dc )
+static int zigzag_sub_4x4ac_field( dctcoef level[16], const pixel *p_src, pixel *p_dst, dctcoef *dc )
{
int nz = 0;
ZIGZAG4_FIELD
return !!nz;
}
-static int zigzag_sub_8x8_frame( int16_t level[64], const pixel *p_src, pixel *p_dst )
+static int zigzag_sub_8x8_frame( dctcoef level[64], const pixel *p_src, pixel *p_dst )
{
int nz = 0;
ZIGZAG8_FRAME
COPY8x8
return !!nz;
}
-static int zigzag_sub_8x8_field( int16_t level[64], const pixel *p_src, pixel *p_dst )
+static int zigzag_sub_8x8_field( dctcoef level[64], const pixel *p_src, pixel *p_dst )
{
int nz = 0;
ZIGZAG8_FIELD
#undef ZIG
#undef COPY4x4
-static void zigzag_interleave_8x8_cavlc( int16_t *dst, int16_t *src, uint8_t *nnz )
+static void zigzag_interleave_8x8_cavlc( dctcoef *dst, dctcoef *src, uint8_t *nnz )
{
for( int i = 0; i < 4; i++ )
{
}
}
-void x264_zigzag_init( int cpu, x264_zigzag_function_t *pf, int b_interlaced )
+void x264_zigzag_init( int cpu, x264_zigzag_function_t *pf_progressive, x264_zigzag_function_t *pf_interlaced )
{
- if( b_interlaced )
+ pf_interlaced->scan_8x8 = zigzag_scan_8x8_field;
+ pf_progressive->scan_8x8 = zigzag_scan_8x8_frame;
+ pf_interlaced->scan_4x4 = zigzag_scan_4x4_field;
+ pf_progressive->scan_4x4 = zigzag_scan_4x4_frame;
+ pf_interlaced->sub_8x8 = zigzag_sub_8x8_field;
+ pf_progressive->sub_8x8 = zigzag_sub_8x8_frame;
+ pf_interlaced->sub_4x4 = zigzag_sub_4x4_field;
+ pf_progressive->sub_4x4 = zigzag_sub_4x4_frame;
+ pf_interlaced->sub_4x4ac = zigzag_sub_4x4ac_field;
+ pf_progressive->sub_4x4ac = zigzag_sub_4x4ac_frame;
+
+#if HIGH_BIT_DEPTH
+#if HAVE_MMX
+ if( cpu&X264_CPU_SSE2 )
{
- pf->scan_8x8 = zigzag_scan_8x8_field;
- pf->scan_4x4 = zigzag_scan_4x4_field;
- pf->sub_8x8 = zigzag_sub_8x8_field;
- pf->sub_4x4 = zigzag_sub_4x4_field;
- pf->sub_4x4ac = zigzag_sub_4x4ac_field;
-#ifdef HAVE_MMX
- if( cpu&X264_CPU_MMXEXT )
- {
- pf->scan_4x4 = x264_zigzag_scan_4x4_field_mmxext;
- pf->scan_8x8 = x264_zigzag_scan_8x8_field_mmxext;
- }
- if( cpu&X264_CPU_SSSE3 )
- {
- pf->sub_4x4 = x264_zigzag_sub_4x4_field_ssse3;
- pf->sub_4x4ac= x264_zigzag_sub_4x4ac_field_ssse3;
- }
-#endif
-
-#ifdef HAVE_ALTIVEC
- if( cpu&X264_CPU_ALTIVEC )
- pf->scan_4x4 = x264_zigzag_scan_4x4_field_altivec;
-#endif
+ pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_sse2;
+ pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_sse2;
+ pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_sse2;
}
- else
- {
- pf->scan_8x8 = zigzag_scan_8x8_frame;
- pf->scan_4x4 = zigzag_scan_4x4_frame;
- pf->sub_8x8 = zigzag_sub_8x8_frame;
- pf->sub_4x4 = zigzag_sub_4x4_frame;
- pf->sub_4x4ac = zigzag_sub_4x4ac_frame;
-#ifdef HAVE_MMX
- if( cpu&X264_CPU_MMX )
- pf->scan_4x4 = x264_zigzag_scan_4x4_frame_mmx;
- if( cpu&X264_CPU_MMXEXT )
- pf->scan_8x8 = x264_zigzag_scan_8x8_frame_mmxext;
- if( cpu&X264_CPU_SSE2_IS_FAST )
- pf->scan_8x8 = x264_zigzag_scan_8x8_frame_sse2;
- if( cpu&X264_CPU_SSSE3 )
- {
- pf->sub_4x4 = x264_zigzag_sub_4x4_frame_ssse3;
- pf->sub_4x4ac= x264_zigzag_sub_4x4ac_frame_ssse3;
- pf->scan_8x8 = x264_zigzag_scan_8x8_frame_ssse3;
- if( cpu&X264_CPU_SHUFFLE_IS_FAST )
- pf->scan_4x4 = x264_zigzag_scan_4x4_frame_ssse3;
- }
-#endif
-
-#ifdef HAVE_ALTIVEC
- if( cpu&X264_CPU_ALTIVEC )
- pf->scan_4x4 = x264_zigzag_scan_4x4_frame_altivec;
+ if( cpu&X264_CPU_SSE4 )
+ pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_sse4;
+ if( cpu&X264_CPU_AVX )
+ pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_avx;
+#if ARCH_X86_64
+ if( cpu&X264_CPU_AVX )
+ {
+ pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_avx;
+ pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_avx;
+ }
+#endif // ARCH_X86_64
+#endif // HAVE_MMX
+#else
+#if HAVE_MMX
+ if( cpu&X264_CPU_MMX )
+ pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_mmx;
+ if( cpu&X264_CPU_MMX2 )
+ {
+ pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_mmx2;
+ pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_mmx2;
+ pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_mmx2;
+ }
+ if( cpu&X264_CPU_SSE2_IS_FAST )
+ pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_sse2;
+ if( cpu&X264_CPU_SSSE3 )
+ {
+ pf_interlaced->sub_4x4 = x264_zigzag_sub_4x4_field_ssse3;
+ pf_progressive->sub_4x4 = x264_zigzag_sub_4x4_frame_ssse3;
+ pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_ssse3;
+ pf_progressive->sub_4x4ac= x264_zigzag_sub_4x4ac_frame_ssse3;
+ pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_ssse3;
+ if( !(cpu&X264_CPU_SLOW_SHUFFLE) )
+ pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_ssse3;
+ }
+ if( cpu&X264_CPU_AVX )
+ {
+ pf_interlaced->sub_4x4 = x264_zigzag_sub_4x4_field_avx;
+ pf_progressive->sub_4x4 = x264_zigzag_sub_4x4_frame_avx;
+#if ARCH_X86_64
+ pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_avx;
+ pf_progressive->sub_4x4ac= x264_zigzag_sub_4x4ac_frame_avx;
#endif
-#ifdef HAVE_ARMV6
- if( cpu&X264_CPU_NEON )
- pf->scan_4x4 = x264_zigzag_scan_4x4_frame_neon;
+ pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_avx;
+ }
+ if( cpu&X264_CPU_XOP )
+ {
+ pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_xop;
+ pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_xop;
+ pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_xop;
+ }
+#endif // HAVE_MMX
+#if HAVE_ALTIVEC
+ if( cpu&X264_CPU_ALTIVEC )
+ {
+ pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_altivec;
+ pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_altivec;
+ }
#endif
+#if HAVE_ARMV6 || ARCH_AARCH64
+ if( cpu&X264_CPU_NEON )
+ {
+ pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_neon;
+#if ARCH_AARCH64
+ pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_neon;
+ pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_neon;
+ pf_interlaced->sub_4x4 = x264_zigzag_sub_4x4_field_neon;
+ pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_neon;
+ pf_interlaced->sub_8x8 = x264_zigzag_sub_8x8_field_neon;
+ pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_neon;
+ pf_progressive->sub_4x4 = x264_zigzag_sub_4x4_frame_neon;
+ pf_progressive->sub_4x4ac = x264_zigzag_sub_4x4ac_frame_neon;
+ pf_progressive->sub_8x8 = x264_zigzag_sub_8x8_frame_neon;
+#endif // ARCH_AARCH64
}
+#endif // HAVE_ARMV6 || ARCH_AARCH64
+#endif // HIGH_BIT_DEPTH
- pf->interleave_8x8_cavlc = zigzag_interleave_8x8_cavlc;
-#ifdef HAVE_MMX
+ pf_interlaced->interleave_8x8_cavlc =
+ pf_progressive->interleave_8x8_cavlc = zigzag_interleave_8x8_cavlc;
+#if HAVE_MMX
+#if HIGH_BIT_DEPTH
+ if( cpu&X264_CPU_SSE2 )
+ {
+ pf_interlaced->interleave_8x8_cavlc =
+ pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_sse2;
+ }
+ if( cpu&X264_CPU_AVX )
+ {
+ pf_interlaced->interleave_8x8_cavlc =
+ pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx;
+ }
+#else
if( cpu&X264_CPU_MMX )
- pf->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_mmx;
- if( cpu&X264_CPU_SHUFFLE_IS_FAST )
- pf->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_sse2;
+ {
+ pf_interlaced->interleave_8x8_cavlc =
+ pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_mmx;
+ }
+ if( (cpu&X264_CPU_SSE2) && !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SSE2_IS_SLOW)) )
+ {
+ pf_interlaced->interleave_8x8_cavlc =
+ pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_sse2;
+ }
+
+ if( cpu&X264_CPU_AVX )
+ {
+ pf_interlaced->interleave_8x8_cavlc =
+ pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx;
+ }
+
+ if( cpu&X264_CPU_AVX2 )
+ {
+ pf_interlaced->interleave_8x8_cavlc =
+ pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx2;
+ }
+#endif // HIGH_BIT_DEPTH
+#endif
+#if !HIGH_BIT_DEPTH
+#if ARCH_AARCH64
+ if( cpu&X264_CPU_NEON )
+ {
+ pf_interlaced->interleave_8x8_cavlc =
+ pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_neon;
+ }
+#endif // ARCH_AARCH64
+#endif // !HIGH_BIT_DEPTH
+#if !HIGH_BIT_DEPTH
+#if HAVE_MSA
+ if( cpu&X264_CPU_MSA )
+ {
+ pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_msa;
+ }
+#endif
#endif
}