/*****************************************************************************
* dct.c: transform and zigzag
*****************************************************************************
- * Copyright (C) 2003-2011 x264 project
+ * Copyright (C) 2003-2016 x264 project
*
* Authors: Loren Merritt <lorenm@u.washington.edu>
* Laurent Aimar <fenrir@via.ecp.fr>
- * Henrik Gramner <hengar-6@student.ltu.se>
+ * Henrik Gramner <henrik@gramner.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#if ARCH_ARM
# include "arm/dct.h"
#endif
+#if ARCH_AARCH64
+# include "aarch64/dct.h"
+#endif
+#if ARCH_MIPS
+# include "mips/dct.h"
+#endif
+
+/* the inverse of the scaling factors introduced by 8x8 fdct */
+/* uint32 is for the asm implementation of trellis. the actual values fit in uint16. */
+#define W(i) (i==0 ? FIX8(1.0000) :\
+ i==1 ? FIX8(0.8859) :\
+ i==2 ? FIX8(1.6000) :\
+ i==3 ? FIX8(0.9415) :\
+ i==4 ? FIX8(1.2651) :\
+ i==5 ? FIX8(1.1910) :0)
+const uint32_t x264_dct8_weight_tab[64] = {
+ W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+ W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+
+ W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+ W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1)
+};
+#undef W
+
+#define W(i) (i==0 ? FIX8(1.76777) :\
+ i==1 ? FIX8(1.11803) :\
+ i==2 ? FIX8(0.70711) :0)
+const uint32_t x264_dct4_weight_tab[16] = {
+ W(0), W(1), W(0), W(1),
+ W(1), W(2), W(1), W(2),
+ W(0), W(1), W(0), W(1),
+ W(1), W(2), W(1), W(2)
+};
+#undef W
+
+/* inverse squared */
+#define W(i) (i==0 ? FIX8(3.125) :\
+ i==1 ? FIX8(1.25) :\
+ i==2 ? FIX8(0.5) :0)
+const uint32_t x264_dct4_weight2_tab[16] = {
+ W(0), W(1), W(0), W(1),
+ W(1), W(2), W(1), W(2),
+ W(0), W(1), W(0), W(1),
+ W(1), W(2), W(1), W(2)
+};
+#undef W
+
+#define W(i) (i==0 ? FIX8(1.00000) :\
+ i==1 ? FIX8(0.78487) :\
+ i==2 ? FIX8(2.56132) :\
+ i==3 ? FIX8(0.88637) :\
+ i==4 ? FIX8(1.60040) :\
+ i==5 ? FIX8(1.41850) :0)
+const uint32_t x264_dct8_weight2_tab[64] = {
+ W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+ W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+
+ W(0), W(3), W(4), W(3), W(0), W(3), W(4), W(3),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1),
+ W(4), W(5), W(2), W(5), W(4), W(5), W(2), W(5),
+ W(3), W(1), W(5), W(1), W(3), W(1), W(5), W(1)
+};
+#undef W
-uint16_t x264_dct4_weight2_zigzag[2][16];
-uint16_t x264_dct8_weight2_zigzag[2][64];
static void dct4x4dc( dctcoef d[16] )
{
dctf->add4x4_idct = x264_add4x4_idct_sse2;
dctf->dct4x4dc = x264_dct4x4dc_sse2;
dctf->idct4x4dc = x264_idct4x4dc_sse2;
+ dctf->dct2x4dc = x264_dct2x4dc_sse2;
+ dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse2;
+ dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse2;
dctf->add8x8_idct = x264_add8x8_idct_sse2;
dctf->add16x16_idct = x264_add16x16_idct_sse2;
+ dctf->add8x8_idct8 = x264_add8x8_idct8_sse2;
+ dctf->add16x16_idct8 = x264_add16x16_idct8_sse2;
dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_sse2;
dctf->add8x8_idct_dc = x264_add8x8_idct_dc_sse2;
dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_sse2;
dctf->add16x16_idct_dc= x264_add16x16_idct_dc_sse2;
}
+ if( cpu&X264_CPU_SSE4 )
+ {
+ dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse4;
+ dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse4;
+ }
if( cpu&X264_CPU_AVX )
{
dctf->add4x4_idct = x264_add4x4_idct_avx;
dctf->dct4x4dc = x264_dct4x4dc_avx;
dctf->idct4x4dc = x264_idct4x4dc_avx;
+ dctf->dct2x4dc = x264_dct2x4dc_avx;
+ dctf->sub8x8_dct8 = x264_sub8x8_dct8_avx;
+ dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx;
dctf->add8x8_idct = x264_add8x8_idct_avx;
dctf->add16x16_idct = x264_add16x16_idct_avx;
+ dctf->add8x8_idct8 = x264_add8x8_idct8_avx;
+ dctf->add16x16_idct8 = x264_add16x16_idct8_avx;
dctf->add8x8_idct_dc = x264_add8x8_idct_dc_avx;
dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_avx;
dctf->add16x16_idct_dc= x264_add16x16_idct_dc_avx;
{
dctf->sub4x4_dct = x264_sub4x4_dct_mmx;
dctf->add4x4_idct = x264_add4x4_idct_mmx;
- dctf->add8x8_idct_dc = x264_add8x8_idct_dc_mmx;
- dctf->add16x16_idct_dc = x264_add16x16_idct_dc_mmx;
- dctf->dct4x4dc = x264_dct4x4dc_mmx;
dctf->idct4x4dc = x264_idct4x4dc_mmx;
dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_mmx2;
#endif
}
+ if( cpu&X264_CPU_MMX2 )
+ {
+ dctf->dct4x4dc = x264_dct4x4dc_mmx2;
+ dctf->dct2x4dc = x264_dct2x4dc_mmx2;
+ dctf->add8x8_idct_dc = x264_add8x8_idct_dc_mmx2;
+ dctf->add16x16_idct_dc = x264_add16x16_idct_dc_mmx2;
+ }
+
if( cpu&X264_CPU_SSE2 )
{
dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse2;
dctf->add8x8_idct8 = x264_add8x8_idct8_sse2;
dctf->add16x16_idct8= x264_add16x16_idct8_sse2;
- dctf->sub8x8_dct = x264_sub8x8_dct_sse2;
- dctf->sub16x16_dct = x264_sub16x16_dct_sse2;
- dctf->add8x8_idct = x264_add8x8_idct_sse2;
- dctf->add16x16_idct = x264_add16x16_idct_sse2;
- dctf->add16x16_idct_dc = x264_add16x16_idct_dc_sse2;
+ if( !(cpu&X264_CPU_SSE2_IS_SLOW) )
+ {
+ dctf->sub8x8_dct = x264_sub8x8_dct_sse2;
+ dctf->sub16x16_dct = x264_sub16x16_dct_sse2;
+ dctf->add8x8_idct = x264_add8x8_idct_sse2;
+ dctf->add16x16_idct = x264_add16x16_idct_sse2;
+ dctf->add16x16_idct_dc = x264_add16x16_idct_dc_sse2;
+ }
}
- if( (cpu&X264_CPU_SSSE3) && !(cpu&X264_CPU_SLOW_ATOM) )
+ if( (cpu&X264_CPU_SSSE3) && !(cpu&X264_CPU_SSE2_IS_SLOW) )
{
- dctf->sub4x4_dct = x264_sub4x4_dct_ssse3;
- dctf->sub8x8_dct = x264_sub8x8_dct_ssse3;
- dctf->sub16x16_dct = x264_sub16x16_dct_ssse3;
- dctf->sub8x8_dct8 = x264_sub8x8_dct8_ssse3;
- dctf->sub16x16_dct8 = x264_sub16x16_dct8_ssse3;
dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_ssse3;
- dctf->add8x8_idct_dc = x264_add8x8_idct_dc_ssse3;
- dctf->add16x16_idct_dc = x264_add16x16_idct_dc_ssse3;
+ if( !(cpu&X264_CPU_SLOW_ATOM) )
+ {
+ dctf->sub4x4_dct = x264_sub4x4_dct_ssse3;
+ dctf->sub8x8_dct = x264_sub8x8_dct_ssse3;
+ dctf->sub16x16_dct = x264_sub16x16_dct_ssse3;
+ dctf->sub8x8_dct8 = x264_sub8x8_dct8_ssse3;
+ dctf->sub16x16_dct8 = x264_sub16x16_dct8_ssse3;
+ if( !(cpu&X264_CPU_SLOW_PSHUFB) )
+ {
+ dctf->add8x8_idct_dc = x264_add8x8_idct_dc_ssse3;
+ dctf->add16x16_idct_dc = x264_add16x16_idct_dc_ssse3;
+ }
+ }
}
if( cpu&X264_CPU_SSE4 )
dctf->sub8x8_dct = x264_sub8x8_dct_xop;
dctf->sub16x16_dct = x264_sub16x16_dct_xop;
}
+
+ if( cpu&X264_CPU_AVX2 )
+ {
+ dctf->add8x8_idct = x264_add8x8_idct_avx2;
+ dctf->add16x16_idct = x264_add16x16_idct_avx2;
+ dctf->sub8x8_dct = x264_sub8x8_dct_avx2;
+ dctf->sub16x16_dct = x264_sub16x16_dct_avx2;
+ dctf->add16x16_idct_dc = x264_add16x16_idct_dc_avx2;
+#if ARCH_X86_64
+ dctf->sub16x16_dct8 = x264_sub16x16_dct8_avx2;
+#endif
+ }
#endif //HAVE_MMX
#if HAVE_ALTIVEC
}
#endif
-#if HAVE_ARMV6
+#if HAVE_ARMV6 || ARCH_AARCH64
if( cpu&X264_CPU_NEON )
{
dctf->sub4x4_dct = x264_sub4x4_dct_neon;
dctf->add8x8_idct8 = x264_add8x8_idct8_neon;
dctf->add16x16_idct8= x264_add16x16_idct8_neon;
+ dctf->sub8x16_dct_dc= x264_sub8x16_dct_dc_neon;
}
#endif
-#endif // HIGH_BIT_DEPTH
-}
-void x264_dct_init_weights( void )
-{
- for( int j = 0; j < 2; j++ )
+#if HAVE_MSA
+ if( cpu&X264_CPU_MSA )
{
- for( int i = 0; i < 16; i++ )
- x264_dct4_weight2_zigzag[j][i] = x264_dct4_weight2_tab[ x264_zigzag_scan4[j][i] ];
- for( int i = 0; i < 64; i++ )
- x264_dct8_weight2_zigzag[j][i] = x264_dct8_weight2_tab[ x264_zigzag_scan8[j][i] ];
+ dctf->sub4x4_dct = x264_sub4x4_dct_msa;
+ dctf->sub8x8_dct = x264_sub8x8_dct_msa;
+ dctf->sub16x16_dct = x264_sub16x16_dct_msa;
+ dctf->sub8x8_dct_dc = x264_sub8x8_dct_dc_msa;
+ dctf->sub8x16_dct_dc = x264_sub8x16_dct_dc_msa;
+ dctf->dct4x4dc = x264_dct4x4dc_msa;
+ dctf->idct4x4dc = x264_idct4x4dc_msa;
+ dctf->add4x4_idct = x264_add4x4_idct_msa;
+ dctf->add8x8_idct = x264_add8x8_idct_msa;
+ dctf->add8x8_idct_dc = x264_add8x8_idct_dc_msa;
+ dctf->add16x16_idct = x264_add16x16_idct_msa;
+ dctf->add16x16_idct_dc = x264_add16x16_idct_dc_msa;
+ dctf->add8x8_idct8 = x264_add8x8_idct8_msa;
+ dctf->add16x16_idct8 = x264_add16x16_idct8_msa;
}
+#endif
+
+#endif // HIGH_BIT_DEPTH
}
pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_ssse3;
pf_progressive->sub_4x4ac= x264_zigzag_sub_4x4ac_frame_ssse3;
pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_ssse3;
- if( cpu&X264_CPU_SHUFFLE_IS_FAST )
+ if( !(cpu&X264_CPU_SLOW_SHUFFLE) )
pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_ssse3;
}
if( cpu&X264_CPU_AVX )
pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_avx;
pf_progressive->sub_4x4ac= x264_zigzag_sub_4x4ac_frame_avx;
#endif
- if( cpu&X264_CPU_SHUFFLE_IS_FAST )
- pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_avx;
+ pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_avx;
}
if( cpu&X264_CPU_XOP )
+ {
pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_xop;
+ pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_xop;
+ pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_xop;
+ }
#endif // HAVE_MMX
#if HAVE_ALTIVEC
if( cpu&X264_CPU_ALTIVEC )
pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_altivec;
}
#endif
-#if HAVE_ARMV6
+#if HAVE_ARMV6 || ARCH_AARCH64
if( cpu&X264_CPU_NEON )
- pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_neon;
-#endif
+ {
+ pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_neon;
+#if ARCH_AARCH64
+ pf_interlaced->scan_4x4 = x264_zigzag_scan_4x4_field_neon;
+ pf_interlaced->scan_8x8 = x264_zigzag_scan_8x8_field_neon;
+ pf_interlaced->sub_4x4 = x264_zigzag_sub_4x4_field_neon;
+ pf_interlaced->sub_4x4ac = x264_zigzag_sub_4x4ac_field_neon;
+ pf_interlaced->sub_8x8 = x264_zigzag_sub_8x8_field_neon;
+ pf_progressive->scan_8x8 = x264_zigzag_scan_8x8_frame_neon;
+ pf_progressive->sub_4x4 = x264_zigzag_sub_4x4_frame_neon;
+ pf_progressive->sub_4x4ac = x264_zigzag_sub_4x4ac_frame_neon;
+ pf_progressive->sub_8x8 = x264_zigzag_sub_8x8_frame_neon;
+#endif // ARCH_AARCH64
+ }
+#endif // HAVE_ARMV6 || ARCH_AARCH64
#endif // HIGH_BIT_DEPTH
pf_interlaced->interleave_8x8_cavlc =
pf_interlaced->interleave_8x8_cavlc =
pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_mmx;
}
- if( cpu&X264_CPU_SHUFFLE_IS_FAST )
+ if( (cpu&X264_CPU_SSE2) && !(cpu&(X264_CPU_SLOW_SHUFFLE|X264_CPU_SSE2_IS_SLOW)) )
{
pf_interlaced->interleave_8x8_cavlc =
pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_sse2;
pf_interlaced->interleave_8x8_cavlc =
pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx;
}
+
+ if( cpu&X264_CPU_AVX2 )
+ {
+ pf_interlaced->interleave_8x8_cavlc =
+ pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_avx2;
+ }
#endif // HIGH_BIT_DEPTH
#endif
+#if !HIGH_BIT_DEPTH
+#if ARCH_AARCH64
+ if( cpu&X264_CPU_NEON )
+ {
+ pf_interlaced->interleave_8x8_cavlc =
+ pf_progressive->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_neon;
+ }
+#endif // ARCH_AARCH64
+#endif // !HIGH_BIT_DEPTH
+#if !HIGH_BIT_DEPTH
+#if HAVE_MSA
+ if( cpu&X264_CPU_MSA )
+ {
+ pf_progressive->scan_4x4 = x264_zigzag_scan_4x4_frame_msa;
+ }
+#endif
+#endif
}