/*****************************************************************************
- * quant.c: h264 encoder library
+ * quant.c: quantization and level-run
*****************************************************************************
- * Copyright (C) 2005 x264 project
+ * Copyright (C) 2005-2016 x264 project
*
- * Authors: Christian Heine <sennindemokrit@gmx.net>
+ * Authors: Loren Merritt <lorenm@u.washington.edu>
+ * Fiona Glaser <fiona@x264.com>
+ * Christian Heine <sennindemokrit@gmx.net>
+ * Henrik Gramner <henrik@gramner.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at licensing@x264.com.
*****************************************************************************/
#include "common.h"
-#ifdef HAVE_MMXEXT
-#include "i386/quant.h"
+#if HAVE_MMX
+#include "x86/quant.h"
+#endif
+#if ARCH_PPC
+# include "ppc/quant.h"
+#endif
+#if ARCH_ARM
+# include "arm/quant.h"
+#endif
+#if ARCH_AARCH64
+# include "aarch64/quant.h"
+#endif
+#if ARCH_MIPS
+# include "mips/quant.h"
#endif
-#define QUANT_ONE( coef, mf ) \
+#define QUANT_ONE( coef, mf, f ) \
{ \
if( (coef) > 0 ) \
- (coef) = ( f + (coef) * (mf) ) >> i_qbits; \
+ (coef) = (f + (coef)) * (mf) >> 16; \
else \
- (coef) = - ( ( f - (coef) * (mf) ) >> i_qbits ); \
+ (coef) = - ((f - (coef)) * (mf) >> 16); \
+ nz |= (coef); \
}
-static void quant_8x8_core( int16_t dct[8][8], int quant_mf[8][8], int i_qbits, int f )
+static int quant_8x8( dctcoef dct[64], udctcoef mf[64], udctcoef bias[64] )
{
- int i;
- for( i = 0; i < 64; i++ )
- QUANT_ONE( dct[0][i], quant_mf[0][i] );
+ int nz = 0;
+ for( int i = 0; i < 64; i++ )
+ QUANT_ONE( dct[i], mf[i], bias[i] );
+ return !!nz;
}
-static void quant_4x4_core( int16_t dct[4][4], int quant_mf[4][4], int i_qbits, int f )
+static int quant_4x4( dctcoef dct[16], udctcoef mf[16], udctcoef bias[16] )
{
- int i;
- for( i = 0; i < 16; i++ )
- QUANT_ONE( dct[0][i], quant_mf[0][i] );
+ int nz = 0;
+ for( int i = 0; i < 16; i++ )
+ QUANT_ONE( dct[i], mf[i], bias[i] );
+ return !!nz;
}
-static void quant_4x4_dc_core( int16_t dct[4][4], int i_quant_mf, int i_qbits, int f )
+static int quant_4x4x4( dctcoef dct[4][16], udctcoef mf[16], udctcoef bias[16] )
{
- int i;
- for( i = 0; i < 16; i++ )
- QUANT_ONE( dct[0][i], i_quant_mf );
+ int nza = 0;
+ for( int j = 0; j < 4; j++ )
+ {
+ int nz = 0;
+ for( int i = 0; i < 16; i++ )
+ QUANT_ONE( dct[j][i], mf[i], bias[i] );
+ nza |= (!!nz)<<j;
+ }
+ return nza;
}
-static void quant_2x2_dc_core( int16_t dct[2][2], int i_quant_mf, int i_qbits, int f )
+static int quant_4x4_dc( dctcoef dct[16], int mf, int bias )
{
- QUANT_ONE( dct[0][0], i_quant_mf );
- QUANT_ONE( dct[0][1], i_quant_mf );
- QUANT_ONE( dct[0][2], i_quant_mf );
- QUANT_ONE( dct[0][3], i_quant_mf );
+ int nz = 0;
+ for( int i = 0; i < 16; i++ )
+ QUANT_ONE( dct[i], mf, bias );
+ return !!nz;
+}
+
+static int quant_2x2_dc( dctcoef dct[4], int mf, int bias )
+{
+ int nz = 0;
+ QUANT_ONE( dct[0], mf, bias );
+ QUANT_ONE( dct[1], mf, bias );
+ QUANT_ONE( dct[2], mf, bias );
+ QUANT_ONE( dct[3], mf, bias );
+ return !!nz;
}
#define DEQUANT_SHL( x ) \
- dct[y][x] = ( dct[y][x] * dequant_mf[i_mf][y][x] ) << i_qbits
+ dct[x] = ( dct[x] * dequant_mf[i_mf][x] ) << i_qbits
#define DEQUANT_SHR( x ) \
- dct[y][x] = ( dct[y][x] * dequant_mf[i_mf][y][x] + f ) >> (-i_qbits)
+ dct[x] = ( dct[x] * dequant_mf[i_mf][x] + f ) >> (-i_qbits)
-static void dequant_4x4( int16_t dct[4][4], int dequant_mf[6][4][4], int i_qp )
+static void dequant_4x4( dctcoef dct[16], int dequant_mf[6][16], int i_qp )
{
const int i_mf = i_qp%6;
const int i_qbits = i_qp/6 - 4;
- int y;
if( i_qbits >= 0 )
{
- for( y = 0; y < 4; y++ )
- {
- DEQUANT_SHL( 0 );
- DEQUANT_SHL( 1 );
- DEQUANT_SHL( 2 );
- DEQUANT_SHL( 3 );
- }
+ for( int i = 0; i < 16; i++ )
+ DEQUANT_SHL( i );
}
else
{
const int f = 1 << (-i_qbits-1);
- for( y = 0; y < 4; y++ )
- {
- DEQUANT_SHR( 0 );
- DEQUANT_SHR( 1 );
- DEQUANT_SHR( 2 );
- DEQUANT_SHR( 3 );
- }
+ for( int i = 0; i < 16; i++ )
+ DEQUANT_SHR( i );
}
}
-static void dequant_8x8( int16_t dct[8][8], int dequant_mf[6][8][8], int i_qp )
+static void dequant_8x8( dctcoef dct[64], int dequant_mf[6][64], int i_qp )
{
const int i_mf = i_qp%6;
const int i_qbits = i_qp/6 - 6;
- int y;
if( i_qbits >= 0 )
{
- for( y = 0; y < 8; y++ )
- {
- DEQUANT_SHL( 0 );
- DEQUANT_SHL( 1 );
- DEQUANT_SHL( 2 );
- DEQUANT_SHL( 3 );
- DEQUANT_SHL( 4 );
- DEQUANT_SHL( 5 );
- DEQUANT_SHL( 6 );
- DEQUANT_SHL( 7 );
- }
+ for( int i = 0; i < 64; i++ )
+ DEQUANT_SHL( i );
}
else
{
const int f = 1 << (-i_qbits-1);
- for( y = 0; y < 8; y++ )
- {
- DEQUANT_SHR( 0 );
- DEQUANT_SHR( 1 );
- DEQUANT_SHR( 2 );
- DEQUANT_SHR( 3 );
- DEQUANT_SHR( 4 );
- DEQUANT_SHR( 5 );
- DEQUANT_SHR( 6 );
- DEQUANT_SHR( 7 );
- }
+ for( int i = 0; i < 64; i++ )
+ DEQUANT_SHR( i );
}
}
-void x264_mb_dequant_2x2_dc( int16_t dct[2][2], int dequant_mf[6][4][4], int i_qp )
+static void dequant_4x4_dc( dctcoef dct[16], int dequant_mf[6][16], int i_qp )
{
- const int i_qbits = i_qp/6 - 5;
+ const int i_qbits = i_qp/6 - 6;
if( i_qbits >= 0 )
{
- const int i_dmf = dequant_mf[i_qp%6][0][0] << i_qbits;
- dct[0][0] *= i_dmf;
- dct[0][1] *= i_dmf;
- dct[1][0] *= i_dmf;
- dct[1][1] *= i_dmf;
+ const int i_dmf = dequant_mf[i_qp%6][0] << i_qbits;
+ for( int i = 0; i < 16; i++ )
+ dct[i] *= i_dmf;
}
else
{
- const int i_dmf = dequant_mf[i_qp%6][0][0];
- // chroma DC is truncated, not rounded
- dct[0][0] = ( dct[0][0] * i_dmf ) >> (-i_qbits);
- dct[0][1] = ( dct[0][1] * i_dmf ) >> (-i_qbits);
- dct[1][0] = ( dct[1][0] * i_dmf ) >> (-i_qbits);
- dct[1][1] = ( dct[1][1] * i_dmf ) >> (-i_qbits);
+ const int i_dmf = dequant_mf[i_qp%6][0];
+ const int f = 1 << (-i_qbits-1);
+ for( int i = 0; i < 16; i++ )
+ dct[i] = ( dct[i] * i_dmf + f ) >> (-i_qbits);
}
}
-void x264_mb_dequant_4x4_dc( int16_t dct[4][4], int dequant_mf[6][4][4], int i_qp )
+#define IDCT_DEQUANT_2X4_START \
+ int a0 = dct[0] + dct[1]; \
+ int a1 = dct[2] + dct[3]; \
+ int a2 = dct[4] + dct[5]; \
+ int a3 = dct[6] + dct[7]; \
+ int a4 = dct[0] - dct[1]; \
+ int a5 = dct[2] - dct[3]; \
+ int a6 = dct[4] - dct[5]; \
+ int a7 = dct[6] - dct[7]; \
+ int b0 = a0 + a1; \
+ int b1 = a2 + a3; \
+ int b2 = a4 + a5; \
+ int b3 = a6 + a7; \
+ int b4 = a0 - a1; \
+ int b5 = a2 - a3; \
+ int b6 = a4 - a5; \
+ int b7 = a6 - a7;
+
+static void idct_dequant_2x4_dc( dctcoef dct[8], dctcoef dct4x4[8][16], int dequant_mf[6][16], int i_qp )
+{
+ IDCT_DEQUANT_2X4_START
+ int dmf = dequant_mf[i_qp%6][0] << i_qp/6;
+ dct4x4[0][0] = ((b0 + b1) * dmf + 32) >> 6;
+ dct4x4[1][0] = ((b2 + b3) * dmf + 32) >> 6;
+ dct4x4[2][0] = ((b0 - b1) * dmf + 32) >> 6;
+ dct4x4[3][0] = ((b2 - b3) * dmf + 32) >> 6;
+ dct4x4[4][0] = ((b4 - b5) * dmf + 32) >> 6;
+ dct4x4[5][0] = ((b6 - b7) * dmf + 32) >> 6;
+ dct4x4[6][0] = ((b4 + b5) * dmf + 32) >> 6;
+ dct4x4[7][0] = ((b6 + b7) * dmf + 32) >> 6;
+}
+
+static void idct_dequant_2x4_dconly( dctcoef dct[8], int dequant_mf[6][16], int i_qp )
{
- const int i_qbits = i_qp/6 - 6;
- int y;
+ IDCT_DEQUANT_2X4_START
+ int dmf = dequant_mf[i_qp%6][0] << i_qp/6;
+ dct[0] = ((b0 + b1) * dmf + 32) >> 6;
+ dct[1] = ((b2 + b3) * dmf + 32) >> 6;
+ dct[2] = ((b0 - b1) * dmf + 32) >> 6;
+ dct[3] = ((b2 - b3) * dmf + 32) >> 6;
+ dct[4] = ((b4 - b5) * dmf + 32) >> 6;
+ dct[5] = ((b6 - b7) * dmf + 32) >> 6;
+ dct[6] = ((b4 + b5) * dmf + 32) >> 6;
+ dct[7] = ((b6 + b7) * dmf + 32) >> 6;
+}
- if( i_qbits >= 0 )
+static ALWAYS_INLINE void optimize_chroma_idct_dequant_2x4( dctcoef out[8], dctcoef dct[8], int dmf )
+{
+ IDCT_DEQUANT_2X4_START
+ out[0] = ((b0 + b1) * dmf + 2080) >> 6; /* 2080 = 32 + (32<<6) */
+ out[1] = ((b2 + b3) * dmf + 2080) >> 6;
+ out[2] = ((b0 - b1) * dmf + 2080) >> 6;
+ out[3] = ((b2 - b3) * dmf + 2080) >> 6;
+ out[4] = ((b4 - b5) * dmf + 2080) >> 6;
+ out[5] = ((b6 - b7) * dmf + 2080) >> 6;
+ out[6] = ((b4 + b5) * dmf + 2080) >> 6;
+ out[7] = ((b6 + b7) * dmf + 2080) >> 6;
+}
+#undef IDCT_DEQUANT_2X4_START
+
+static ALWAYS_INLINE void optimize_chroma_idct_dequant_2x2( dctcoef out[4], dctcoef dct[4], int dmf )
+{
+ int d0 = dct[0] + dct[1];
+ int d1 = dct[2] + dct[3];
+ int d2 = dct[0] - dct[1];
+ int d3 = dct[2] - dct[3];
+ out[0] = ((d0 + d1) * dmf >> 5) + 32;
+ out[1] = ((d0 - d1) * dmf >> 5) + 32;
+ out[2] = ((d2 + d3) * dmf >> 5) + 32;
+ out[3] = ((d2 - d3) * dmf >> 5) + 32;
+}
+
+static ALWAYS_INLINE int optimize_chroma_round( dctcoef *ref, dctcoef *dct, int dequant_mf, int chroma422 )
+{
+ dctcoef out[8];
+
+ if( chroma422 )
+ optimize_chroma_idct_dequant_2x4( out, dct, dequant_mf );
+ else
+ optimize_chroma_idct_dequant_2x2( out, dct, dequant_mf );
+
+ int sum = 0;
+ for( int i = 0; i < (chroma422?8:4); i++ )
+ sum |= ref[i] ^ out[i];
+ return sum >> 6;
+}
+
+static ALWAYS_INLINE int optimize_chroma_dc_internal( dctcoef *dct, int dequant_mf, int chroma422 )
+{
+ /* dequant_mf = h->dequant4_mf[CQM_4IC + b_inter][i_qp%6][0] << i_qp/6, max 32*64 */
+ dctcoef dct_orig[8];
+ int coeff, nz;
+
+ if( chroma422 )
+ optimize_chroma_idct_dequant_2x4( dct_orig, dct, dequant_mf );
+ else
+ optimize_chroma_idct_dequant_2x2( dct_orig, dct, dequant_mf );
+
+ /* If the DC coefficients already round to zero, terminate early. */
+ int sum = 0;
+ for( int i = 0; i < (chroma422?8:4); i++ )
+ sum |= dct_orig[i];
+ if( !(sum >> 6) )
+ return 0;
+
+ /* Start with the highest frequency coefficient... is this the best option? */
+ for( nz = 0, coeff = (chroma422?7:3); coeff >= 0; coeff-- )
{
- const int i_dmf = dequant_mf[i_qp%6][0][0] << i_qbits;
+ int level = dct[coeff];
+ int sign = level>>31 | 1; /* dct[coeff] < 0 ? -1 : 1 */
- for( y = 0; y < 4; y++ )
+ while( level )
{
- dct[y][0] *= i_dmf;
- dct[y][1] *= i_dmf;
- dct[y][2] *= i_dmf;
- dct[y][3] *= i_dmf;
+ dct[coeff] = level - sign;
+ if( optimize_chroma_round( dct_orig, dct, dequant_mf, chroma422 ) )
+ {
+ nz = 1;
+ dct[coeff] = level;
+ break;
+ }
+ level -= sign;
}
}
- else
+
+ return nz;
+}
+
+static int optimize_chroma_2x2_dc( dctcoef dct[4], int dequant_mf )
+{
+ return optimize_chroma_dc_internal( dct, dequant_mf, 0 );
+}
+
+static int optimize_chroma_2x4_dc( dctcoef dct[8], int dequant_mf )
+{
+ return optimize_chroma_dc_internal( dct, dequant_mf, 1 );
+}
+
+static void x264_denoise_dct( dctcoef *dct, uint32_t *sum, udctcoef *offset, int size )
+{
+ for( int i = 0; i < size; i++ )
{
- const int i_dmf = dequant_mf[i_qp%6][0][0];
- const int f = 1 << (-i_qbits-1);
+ int level = dct[i];
+ int sign = level>>31;
+ level = (level+sign)^sign;
+ sum[i] += level;
+ level -= offset[i];
+ dct[i] = level<0 ? 0 : (level^sign)-sign;
+ }
+}
- for( y = 0; y < 4; y++ )
+/* (ref: JVT-B118)
+ * x264_mb_decimate_score: given dct coeffs it returns a score to see if we could empty this dct coeffs
+ * to 0 (low score means set it to null)
+ * Used in inter macroblock (luma and chroma)
+ * luma: for a 8x8 block: if score < 4 -> null
+ * for the complete mb: if score < 6 -> null
+ * chroma: for the complete mb: if score < 7 -> null
+ */
+
+const uint8_t x264_decimate_table4[16] =
+{
+ 3,2,2,1,1,1,0,0,0,0,0,0,0,0,0,0
+};
+const uint8_t x264_decimate_table8[64] =
+{
+ 3,3,3,3,2,2,2,2,2,2,2,2,1,1,1,1,
+ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+};
+
+static int ALWAYS_INLINE x264_decimate_score_internal( dctcoef *dct, int i_max )
+{
+ const uint8_t *ds_table = (i_max == 64) ? x264_decimate_table8 : x264_decimate_table4;
+ int i_score = 0;
+ int idx = i_max - 1;
+
+ while( idx >= 0 && dct[idx] == 0 )
+ idx--;
+ while( idx >= 0 )
+ {
+ int i_run;
+
+ if( (unsigned)(dct[idx--] + 1) > 2 )
+ return 9;
+
+ i_run = 0;
+ while( idx >= 0 && dct[idx] == 0 )
{
- dct[y][0] = ( dct[y][0] * i_dmf + f ) >> (-i_qbits);
- dct[y][1] = ( dct[y][1] * i_dmf + f ) >> (-i_qbits);
- dct[y][2] = ( dct[y][2] * i_dmf + f ) >> (-i_qbits);
- dct[y][3] = ( dct[y][3] * i_dmf + f ) >> (-i_qbits);
+ idx--;
+ i_run++;
}
+ i_score += ds_table[i_run];
}
+
+ return i_score;
}
-void x264_quant_init( x264_t *h, int cpu, x264_quant_function_t *pf )
+static int x264_decimate_score15( dctcoef *dct )
+{
+ return x264_decimate_score_internal( dct+1, 15 );
+}
+static int x264_decimate_score16( dctcoef *dct )
+{
+ return x264_decimate_score_internal( dct, 16 );
+}
+static int x264_decimate_score64( dctcoef *dct )
{
- int i, maxQ8=0, maxQ4=0, maxQdc=0;
+ return x264_decimate_score_internal( dct, 64 );
+}
+
+#define last(num)\
+static int x264_coeff_last##num( dctcoef *l )\
+{\
+ int i_last = num-1;\
+ while( i_last >= 0 && l[i_last] == 0 )\
+ i_last--;\
+ return i_last;\
+}
+
+last(4)
+last(8)
+last(15)
+last(16)
+last(64)
+
+#define level_run(num)\
+static int x264_coeff_level_run##num( dctcoef *dct, x264_run_level_t *runlevel )\
+{\
+ int i_last = runlevel->last = x264_coeff_last##num(dct);\
+ int i_total = 0;\
+ int mask = 0;\
+ do\
+ {\
+ runlevel->level[i_total++] = dct[i_last];\
+ mask |= 1 << (i_last);\
+ while( --i_last >= 0 && dct[i_last] == 0 );\
+ } while( i_last >= 0 );\
+ runlevel->mask = mask;\
+ return i_total;\
+}
+
+level_run(4)
+level_run(8)
+level_run(15)
+level_run(16)
- pf->quant_8x8_core = quant_8x8_core;
- pf->quant_4x4_core = quant_4x4_core;
- pf->quant_4x4_dc_core = quant_4x4_dc_core;
- pf->quant_2x2_dc_core = quant_2x2_dc_core;
+#if ARCH_X86_64
+#define INIT_TRELLIS(cpu)\
+ pf->trellis_cabac_4x4 = x264_trellis_cabac_4x4_##cpu;\
+ pf->trellis_cabac_8x8 = x264_trellis_cabac_8x8_##cpu;\
+ pf->trellis_cabac_4x4_psy = x264_trellis_cabac_4x4_psy_##cpu;\
+ pf->trellis_cabac_8x8_psy = x264_trellis_cabac_8x8_psy_##cpu;\
+ pf->trellis_cabac_dc = x264_trellis_cabac_dc_##cpu;\
+ pf->trellis_cabac_chroma_422_dc = x264_trellis_cabac_chroma_422_dc_##cpu;
+#else
+#define INIT_TRELLIS(...)
+#endif
+
+void x264_quant_init( x264_t *h, int cpu, x264_quant_function_t *pf )
+{
+ pf->quant_8x8 = quant_8x8;
+ pf->quant_4x4 = quant_4x4;
+ pf->quant_4x4x4 = quant_4x4x4;
+ pf->quant_4x4_dc = quant_4x4_dc;
+ pf->quant_2x2_dc = quant_2x2_dc;
pf->dequant_4x4 = dequant_4x4;
+ pf->dequant_4x4_dc = dequant_4x4_dc;
pf->dequant_8x8 = dequant_8x8;
-#ifdef HAVE_MMXEXT
+ pf->idct_dequant_2x4_dc = idct_dequant_2x4_dc;
+ pf->idct_dequant_2x4_dconly = idct_dequant_2x4_dconly;
+
+ pf->optimize_chroma_2x2_dc = optimize_chroma_2x2_dc;
+ pf->optimize_chroma_2x4_dc = optimize_chroma_2x4_dc;
+
+ pf->denoise_dct = x264_denoise_dct;
+ pf->decimate_score15 = x264_decimate_score15;
+ pf->decimate_score16 = x264_decimate_score16;
+ pf->decimate_score64 = x264_decimate_score64;
- /* determine the biggest coeffient in all quant8_mf tables */
- for( i = 0; i < 2*6*8*8; i++ )
+ pf->coeff_last4 = x264_coeff_last4;
+ pf->coeff_last8 = x264_coeff_last8;
+ pf->coeff_last[ DCT_LUMA_AC] = x264_coeff_last15;
+ pf->coeff_last[ DCT_LUMA_4x4] = x264_coeff_last16;
+ pf->coeff_last[ DCT_LUMA_8x8] = x264_coeff_last64;
+ pf->coeff_level_run4 = x264_coeff_level_run4;
+ pf->coeff_level_run8 = x264_coeff_level_run8;
+ pf->coeff_level_run[ DCT_LUMA_AC] = x264_coeff_level_run15;
+ pf->coeff_level_run[ DCT_LUMA_4x4] = x264_coeff_level_run16;
+
+#if HIGH_BIT_DEPTH
+#if HAVE_MMX
+ INIT_TRELLIS( sse2 );
+ if( cpu&X264_CPU_MMX2 )
+ {
+#if ARCH_X86
+ pf->denoise_dct = x264_denoise_dct_mmx;
+ pf->decimate_score15 = x264_decimate_score15_mmx2;
+ pf->decimate_score16 = x264_decimate_score16_mmx2;
+ pf->decimate_score64 = x264_decimate_score64_mmx2;
+ pf->coeff_last8 = x264_coeff_last8_mmx2;
+ pf->coeff_last[ DCT_LUMA_AC] = x264_coeff_last15_mmx2;
+ pf->coeff_last[ DCT_LUMA_4x4] = x264_coeff_last16_mmx2;
+ pf->coeff_last[ DCT_LUMA_8x8] = x264_coeff_last64_mmx2;
+ pf->coeff_level_run8 = x264_coeff_level_run8_mmx2;
+ pf->coeff_level_run[ DCT_LUMA_AC] = x264_coeff_level_run15_mmx2;
+ pf->coeff_level_run[ DCT_LUMA_4x4] = x264_coeff_level_run16_mmx2;
+#endif
+ pf->coeff_last4 = x264_coeff_last4_mmx2;
+ pf->coeff_level_run4 = x264_coeff_level_run4_mmx2;
+ if( cpu&X264_CPU_LZCNT )
+ pf->coeff_level_run4 = x264_coeff_level_run4_mmx2_lzcnt;
+ }
+ if( cpu&X264_CPU_SSE2 )
+ {
+ pf->quant_4x4 = x264_quant_4x4_sse2;
+ pf->quant_4x4x4 = x264_quant_4x4x4_sse2;
+ pf->quant_8x8 = x264_quant_8x8_sse2;
+ pf->quant_2x2_dc = x264_quant_2x2_dc_sse2;
+ pf->quant_4x4_dc = x264_quant_4x4_dc_sse2;
+ pf->dequant_4x4 = x264_dequant_4x4_sse2;
+ pf->dequant_8x8 = x264_dequant_8x8_sse2;
+ pf->dequant_4x4_dc = x264_dequant_4x4dc_sse2;
+ pf->idct_dequant_2x4_dc = x264_idct_dequant_2x4_dc_sse2;
+ pf->idct_dequant_2x4_dconly = x264_idct_dequant_2x4_dconly_sse2;
+ pf->denoise_dct = x264_denoise_dct_sse2;
+ pf->decimate_score15 = x264_decimate_score15_sse2;
+ pf->decimate_score16 = x264_decimate_score16_sse2;
+ pf->decimate_score64 = x264_decimate_score64_sse2;
+ pf->coeff_last8 = x264_coeff_last8_sse2;
+ pf->coeff_last[ DCT_LUMA_AC] = x264_coeff_last15_sse2;
+ pf->coeff_last[DCT_LUMA_4x4] = x264_coeff_last16_sse2;
+ pf->coeff_last[DCT_LUMA_8x8] = x264_coeff_last64_sse2;
+ pf->coeff_level_run8 = x264_coeff_level_run8_sse2;
+ pf->coeff_level_run[ DCT_LUMA_AC] = x264_coeff_level_run15_sse2;
+ pf->coeff_level_run[DCT_LUMA_4x4] = x264_coeff_level_run16_sse2;
+ if( cpu&X264_CPU_LZCNT )
+ {
+ pf->coeff_last4 = x264_coeff_last4_mmx2_lzcnt;
+ pf->coeff_last8 = x264_coeff_last8_sse2_lzcnt;
+ pf->coeff_last[ DCT_LUMA_AC] = x264_coeff_last15_sse2_lzcnt;
+ pf->coeff_last[DCT_LUMA_4x4] = x264_coeff_last16_sse2_lzcnt;
+ pf->coeff_last[DCT_LUMA_8x8] = x264_coeff_last64_sse2_lzcnt;
+ pf->coeff_level_run8 = x264_coeff_level_run8_sse2_lzcnt;
+ pf->coeff_level_run[ DCT_LUMA_AC] = x264_coeff_level_run15_sse2_lzcnt;
+ pf->coeff_level_run[DCT_LUMA_4x4] = x264_coeff_level_run16_sse2_lzcnt;
+ }
+ }
+ if( cpu&X264_CPU_SSSE3 )
+ {
+ pf->quant_4x4 = x264_quant_4x4_ssse3;
+ pf->quant_4x4x4 = x264_quant_4x4x4_ssse3;
+ pf->quant_8x8 = x264_quant_8x8_ssse3;
+ pf->quant_2x2_dc = x264_quant_2x2_dc_ssse3;
+ pf->quant_4x4_dc = x264_quant_4x4_dc_ssse3;
+ pf->denoise_dct = x264_denoise_dct_ssse3;
+ pf->decimate_score15 = x264_decimate_score15_ssse3;
+ pf->decimate_score16 = x264_decimate_score16_ssse3;
+ pf->decimate_score64 = x264_decimate_score64_ssse3;
+ INIT_TRELLIS( ssse3 );
+ }
+ if( cpu&X264_CPU_SSE4 )
+ {
+ pf->quant_2x2_dc = x264_quant_2x2_dc_sse4;
+ pf->quant_4x4_dc = x264_quant_4x4_dc_sse4;
+ pf->quant_4x4 = x264_quant_4x4_sse4;
+ pf->quant_4x4x4 = x264_quant_4x4x4_sse4;
+ pf->quant_8x8 = x264_quant_8x8_sse4;
+ }
+ if( cpu&X264_CPU_AVX )
+ {
+ pf->idct_dequant_2x4_dc = x264_idct_dequant_2x4_dc_avx;
+ pf->idct_dequant_2x4_dconly = x264_idct_dequant_2x4_dconly_avx;
+ pf->denoise_dct = x264_denoise_dct_avx;
+ }
+ if( cpu&X264_CPU_XOP )
+ {
+ pf->dequant_4x4_dc = x264_dequant_4x4dc_xop;
+ if( h->param.i_cqm_preset != X264_CQM_FLAT )
+ {
+ pf->dequant_4x4 = x264_dequant_4x4_xop;
+ pf->dequant_8x8 = x264_dequant_8x8_xop;
+ }
+ }
+ if( cpu&X264_CPU_AVX2 )
{
- int q = h->quant8_mf[0][0][0][i];
- if( maxQ8 < q )
- maxQ8 = q;
+ pf->quant_4x4 = x264_quant_4x4_avx2;
+ pf->quant_4x4_dc = x264_quant_4x4_dc_avx2;
+ pf->quant_8x8 = x264_quant_8x8_avx2;
+ pf->quant_4x4x4 = x264_quant_4x4x4_avx2;
+ pf->dequant_4x4 = x264_dequant_4x4_avx2;
+ pf->dequant_8x8 = x264_dequant_8x8_avx2;
+ pf->dequant_4x4_dc = x264_dequant_4x4dc_avx2;
+ pf->denoise_dct = x264_denoise_dct_avx2;
+ if( cpu&X264_CPU_LZCNT )
+ pf->coeff_last[DCT_LUMA_8x8] = x264_coeff_last64_avx2_lzcnt;
+ }
+#endif // HAVE_MMX
+#else // !HIGH_BIT_DEPTH
+#if HAVE_MMX
+ INIT_TRELLIS( sse2 );
+ if( cpu&X264_CPU_MMX )
+ {
+#if ARCH_X86
+ pf->dequant_4x4 = x264_dequant_4x4_mmx;
+ pf->dequant_4x4_dc = x264_dequant_4x4dc_mmx2;
+ pf->dequant_8x8 = x264_dequant_8x8_mmx;
+ if( h->param.i_cqm_preset == X264_CQM_FLAT )
+ {
+ pf->dequant_4x4 = x264_dequant_4x4_flat16_mmx;
+ pf->dequant_8x8 = x264_dequant_8x8_flat16_mmx;
+ }
+ pf->denoise_dct = x264_denoise_dct_mmx;
+#endif
}
- /* determine the biggest coeffient in all quant4_mf tables ( maxQ4 )
- and the biggest DC coefficient if all quant4_mf tables ( maxQdc ) */
- for( i = 0; i < 4*6*4*4; i++ )
+ if( cpu&X264_CPU_MMX2 )
{
- int q = h->quant4_mf[0][0][0][i];
- if( maxQ4 < q )
- maxQ4 = q;
- if( maxQdc < q && i%16 == 0 )
- maxQdc = q;
+ pf->quant_2x2_dc = x264_quant_2x2_dc_mmx2;
+#if ARCH_X86
+ pf->quant_4x4 = x264_quant_4x4_mmx2;
+ pf->quant_8x8 = x264_quant_8x8_mmx2;
+ pf->quant_4x4_dc = x264_quant_4x4_dc_mmx2;
+ pf->decimate_score15 = x264_decimate_score15_mmx2;
+ pf->decimate_score16 = x264_decimate_score16_mmx2;
+ pf->decimate_score64 = x264_decimate_score64_mmx2;
+ pf->coeff_last[ DCT_LUMA_AC] = x264_coeff_last15_mmx2;
+ pf->coeff_last[ DCT_LUMA_4x4] = x264_coeff_last16_mmx2;
+ pf->coeff_last[ DCT_LUMA_8x8] = x264_coeff_last64_mmx2;
+ pf->coeff_level_run[ DCT_LUMA_AC] = x264_coeff_level_run15_mmx2;
+ pf->coeff_level_run[ DCT_LUMA_4x4] = x264_coeff_level_run16_mmx2;
+#endif
+ pf->coeff_last4 = x264_coeff_last4_mmx2;
+ pf->coeff_last8 = x264_coeff_last8_mmx2;
+ pf->coeff_level_run4 = x264_coeff_level_run4_mmx2;
+ pf->coeff_level_run8 = x264_coeff_level_run8_mmx2;
+ if( cpu&X264_CPU_LZCNT )
+ {
+ pf->coeff_last4 = x264_coeff_last4_mmx2_lzcnt;
+ pf->coeff_last8 = x264_coeff_last8_mmx2_lzcnt;
+ pf->coeff_level_run4 = x264_coeff_level_run4_mmx2_lzcnt;
+ pf->coeff_level_run8 = x264_coeff_level_run8_mmx2_lzcnt;
+ }
}
- /* select quant_8x8 based on CPU and maxQ8 */
- if( maxQ8 < (1<<15) && cpu&X264_CPU_MMX )
- pf->quant_8x8_core = x264_quant_8x8_core15_mmx;
- else
- if( maxQ8 < (1<<16) && cpu&X264_CPU_MMXEXT )
- pf->quant_8x8_core = x264_quant_8x8_core16_mmxext;
- else
- if( cpu&X264_CPU_MMXEXT )
- pf->quant_8x8_core = x264_quant_8x8_core32_mmxext;
+ if( cpu&X264_CPU_SSE2 )
+ {
+ pf->quant_4x4_dc = x264_quant_4x4_dc_sse2;
+ pf->quant_4x4 = x264_quant_4x4_sse2;
+ pf->quant_4x4x4 = x264_quant_4x4x4_sse2;
+ pf->quant_8x8 = x264_quant_8x8_sse2;
+ pf->dequant_4x4 = x264_dequant_4x4_sse2;
+ pf->dequant_4x4_dc = x264_dequant_4x4dc_sse2;
+ pf->dequant_8x8 = x264_dequant_8x8_sse2;
+ if( h->param.i_cqm_preset == X264_CQM_FLAT )
+ {
+ pf->dequant_4x4 = x264_dequant_4x4_flat16_sse2;
+ pf->dequant_8x8 = x264_dequant_8x8_flat16_sse2;
+ }
+ pf->idct_dequant_2x4_dc = x264_idct_dequant_2x4_dc_sse2;
+ pf->idct_dequant_2x4_dconly = x264_idct_dequant_2x4_dconly_sse2;
+ pf->optimize_chroma_2x2_dc = x264_optimize_chroma_2x2_dc_sse2;
+ pf->denoise_dct = x264_denoise_dct_sse2;
+ pf->decimate_score15 = x264_decimate_score15_sse2;
+ pf->decimate_score16 = x264_decimate_score16_sse2;
+ pf->decimate_score64 = x264_decimate_score64_sse2;
+ pf->coeff_last[ DCT_LUMA_AC] = x264_coeff_last15_sse2;
+ pf->coeff_last[DCT_LUMA_4x4] = x264_coeff_last16_sse2;
+ pf->coeff_last[DCT_LUMA_8x8] = x264_coeff_last64_sse2;
+ pf->coeff_level_run[ DCT_LUMA_AC] = x264_coeff_level_run15_sse2;
+ pf->coeff_level_run[DCT_LUMA_4x4] = x264_coeff_level_run16_sse2;
+ if( cpu&X264_CPU_LZCNT )
+ {
+ pf->coeff_last[ DCT_LUMA_AC] = x264_coeff_last15_sse2_lzcnt;
+ pf->coeff_last[DCT_LUMA_4x4] = x264_coeff_last16_sse2_lzcnt;
+ pf->coeff_last[DCT_LUMA_8x8] = x264_coeff_last64_sse2_lzcnt;
+ pf->coeff_level_run[ DCT_LUMA_AC] = x264_coeff_level_run15_sse2_lzcnt;
+ pf->coeff_level_run[DCT_LUMA_4x4] = x264_coeff_level_run16_sse2_lzcnt;
+ }
+ }
- /* select quant_4x4 based on CPU and maxQ4 */
- if( maxQ4 < (1<<15) && cpu&X264_CPU_MMX )
- pf->quant_4x4_core = x264_quant_4x4_core15_mmx;
- else
- if( maxQ4 < (1<<16) && cpu&X264_CPU_MMXEXT )
- pf->quant_4x4_core = x264_quant_4x4_core16_mmxext;
- else
- if( cpu&X264_CPU_MMXEXT )
- pf->quant_4x4_core = x264_quant_4x4_core32_mmxext;
+ if( cpu&X264_CPU_SSSE3 )
+ {
+ pf->quant_2x2_dc = x264_quant_2x2_dc_ssse3;
+ pf->quant_4x4_dc = x264_quant_4x4_dc_ssse3;
+ pf->quant_4x4 = x264_quant_4x4_ssse3;
+ pf->quant_4x4x4 = x264_quant_4x4x4_ssse3;
+ pf->quant_8x8 = x264_quant_8x8_ssse3;
+ pf->optimize_chroma_2x2_dc = x264_optimize_chroma_2x2_dc_ssse3;
+ pf->denoise_dct = x264_denoise_dct_ssse3;
+ pf->decimate_score15 = x264_decimate_score15_ssse3;
+ pf->decimate_score16 = x264_decimate_score16_ssse3;
+ pf->decimate_score64 = x264_decimate_score64_ssse3;
+ INIT_TRELLIS( ssse3 );
+ pf->coeff_level_run4 = x264_coeff_level_run4_ssse3;
+ pf->coeff_level_run8 = x264_coeff_level_run8_ssse3;
+ pf->coeff_level_run[ DCT_LUMA_AC] = x264_coeff_level_run15_ssse3;
+ pf->coeff_level_run[DCT_LUMA_4x4] = x264_coeff_level_run16_ssse3;
+ if( cpu&X264_CPU_LZCNT )
+ {
+ pf->coeff_level_run4 = x264_coeff_level_run4_ssse3;
+ pf->coeff_level_run8 = x264_coeff_level_run8_ssse3;
+ pf->coeff_level_run[ DCT_LUMA_AC] = x264_coeff_level_run15_ssse3_lzcnt;
+ pf->coeff_level_run[DCT_LUMA_4x4] = x264_coeff_level_run16_ssse3_lzcnt;
+ }
+ }
- /* select quant_XxX_dc based on CPU and maxQdc */
- if( maxQdc < (1<<16) && cpu&X264_CPU_MMXEXT )
+ if( cpu&X264_CPU_SSE4 )
{
- pf->quant_4x4_dc_core = x264_quant_4x4_dc_core16_mmxext;
- pf->quant_2x2_dc_core = x264_quant_2x2_dc_core16_mmxext;
+ pf->quant_4x4_dc = x264_quant_4x4_dc_sse4;
+ pf->quant_4x4 = x264_quant_4x4_sse4;
+ pf->quant_8x8 = x264_quant_8x8_sse4;
+ pf->optimize_chroma_2x2_dc = x264_optimize_chroma_2x2_dc_sse4;
}
- else
- if( maxQdc < (1<<15) && cpu&X264_CPU_MMX )
+
+ if( cpu&X264_CPU_AVX )
{
- pf->quant_4x4_dc_core = x264_quant_4x4_dc_core15_mmx;
- pf->quant_2x2_dc_core = x264_quant_2x2_dc_core15_mmx;
+ pf->dequant_4x4_dc = x264_dequant_4x4dc_avx;
+ if( h->param.i_cqm_preset != X264_CQM_FLAT )
+ {
+ pf->dequant_4x4 = x264_dequant_4x4_avx;
+ pf->dequant_8x8 = x264_dequant_8x8_avx;
+ }
+ pf->idct_dequant_2x4_dc = x264_idct_dequant_2x4_dc_avx;
+ pf->idct_dequant_2x4_dconly = x264_idct_dequant_2x4_dconly_avx;
+ pf->optimize_chroma_2x2_dc = x264_optimize_chroma_2x2_dc_avx;
+ pf->denoise_dct = x264_denoise_dct_avx;
}
- else
- if( cpu&X264_CPU_MMXEXT )
+
+ if( cpu&X264_CPU_XOP )
{
- pf->quant_4x4_dc_core = x264_quant_4x4_dc_core32_mmxext;
- pf->quant_2x2_dc_core = x264_quant_2x2_dc_core32_mmxext;
+ if( h->param.i_cqm_preset != X264_CQM_FLAT )
+ {
+ pf->dequant_4x4 = x264_dequant_4x4_xop;
+ pf->dequant_8x8 = x264_dequant_8x8_xop;
+ }
}
- if( cpu&X264_CPU_MMX )
+ if( cpu&X264_CPU_AVX2 )
{
- /* dequant is not subject to the above CQM-dependent overflow issues,
- * as long as the inputs are in the range generable by dct+quant.
- * that is not guaranteed by the standard, but is true within x264 */
- pf->dequant_4x4 = x264_dequant_4x4_mmx;
- pf->dequant_8x8 = x264_dequant_8x8_mmx;
+ pf->quant_4x4 = x264_quant_4x4_avx2;
+ pf->quant_4x4_dc = x264_quant_4x4_dc_avx2;
+ pf->quant_8x8 = x264_quant_8x8_avx2;
+ pf->quant_4x4x4 = x264_quant_4x4x4_avx2;
+ pf->dequant_4x4 = x264_dequant_4x4_avx2;
+ pf->dequant_8x8 = x264_dequant_8x8_avx2;
+ pf->dequant_4x4_dc = x264_dequant_4x4dc_avx2;
+ if( h->param.i_cqm_preset == X264_CQM_FLAT )
+ {
+ pf->dequant_4x4 = x264_dequant_4x4_flat16_avx2;
+ pf->dequant_8x8 = x264_dequant_8x8_flat16_avx2;
+ }
+ pf->decimate_score64 = x264_decimate_score64_avx2;
+ pf->denoise_dct = x264_denoise_dct_avx2;
+ if( cpu&X264_CPU_LZCNT )
+ {
+ pf->coeff_last[DCT_LUMA_8x8] = x264_coeff_last64_avx2_lzcnt;
+ pf->coeff_level_run[ DCT_LUMA_AC] = x264_coeff_level_run15_avx2_lzcnt;
+ pf->coeff_level_run[DCT_LUMA_4x4] = x264_coeff_level_run16_avx2_lzcnt;
+ }
}
-#endif /* HAVE_MMXEXT */
+#endif // HAVE_MMX
+
+#if HAVE_ALTIVEC
+ if( cpu&X264_CPU_ALTIVEC )
+ {
+ pf->quant_2x2_dc = x264_quant_2x2_dc_altivec;
+ pf->quant_4x4_dc = x264_quant_4x4_dc_altivec;
+ pf->quant_4x4 = x264_quant_4x4_altivec;
+ pf->quant_8x8 = x264_quant_8x8_altivec;
+
+ pf->dequant_4x4 = x264_dequant_4x4_altivec;
+ pf->dequant_8x8 = x264_dequant_8x8_altivec;
+ }
+#endif
+
+#if HAVE_ARMV6
+ if( cpu&X264_CPU_ARMV6 )
+ {
+ pf->coeff_last4 = x264_coeff_last4_arm;
+ pf->coeff_last8 = x264_coeff_last8_arm;
+ }
+#endif
+#if HAVE_ARMV6 || ARCH_AARCH64
+ if( cpu&X264_CPU_NEON )
+ {
+ pf->quant_2x2_dc = x264_quant_2x2_dc_neon;
+ pf->quant_4x4 = x264_quant_4x4_neon;
+ pf->quant_4x4_dc = x264_quant_4x4_dc_neon;
+ pf->quant_4x4x4 = x264_quant_4x4x4_neon;
+ pf->quant_8x8 = x264_quant_8x8_neon;
+ pf->dequant_4x4 = x264_dequant_4x4_neon;
+ pf->dequant_4x4_dc = x264_dequant_4x4_dc_neon;
+ pf->dequant_8x8 = x264_dequant_8x8_neon;
+ pf->coeff_last[ DCT_LUMA_AC] = x264_coeff_last15_neon;
+ pf->coeff_last[DCT_LUMA_4x4] = x264_coeff_last16_neon;
+ pf->coeff_last[DCT_LUMA_8x8] = x264_coeff_last64_neon;
+ pf->denoise_dct = x264_denoise_dct_neon;
+ pf->decimate_score15 = x264_decimate_score15_neon;
+ pf->decimate_score16 = x264_decimate_score16_neon;
+ pf->decimate_score64 = x264_decimate_score64_neon;
+ }
+#endif
+#if ARCH_AARCH64
+ if( cpu&X264_CPU_ARMV8 )
+ {
+ pf->coeff_last4 = x264_coeff_last4_aarch64;
+ pf->coeff_last8 = x264_coeff_last8_aarch64;
+ pf->coeff_level_run4 = x264_coeff_level_run4_aarch64;
+ }
+ if( cpu&X264_CPU_NEON )
+ {
+ pf->coeff_level_run8 = x264_coeff_level_run8_neon;
+ pf->coeff_level_run[ DCT_LUMA_AC] = x264_coeff_level_run15_neon;
+ pf->coeff_level_run[ DCT_LUMA_4x4] = x264_coeff_level_run16_neon;
+ }
+#endif
+
+#if HAVE_MSA
+ if( cpu&X264_CPU_MSA )
+ {
+ pf->quant_4x4 = x264_quant_4x4_msa;
+ pf->quant_4x4_dc = x264_quant_4x4_dc_msa;
+ pf->quant_4x4x4 = x264_quant_4x4x4_msa;
+ pf->quant_8x8 = x264_quant_8x8_msa;
+ pf->dequant_4x4 = x264_dequant_4x4_msa;
+ pf->dequant_4x4_dc = x264_dequant_4x4_dc_msa;
+ pf->dequant_8x8 = x264_dequant_8x8_msa;
+ pf->coeff_last[DCT_LUMA_4x4] = x264_coeff_last16_msa;
+ pf->coeff_last[DCT_LUMA_8x8] = x264_coeff_last64_msa;
+ }
+#endif
+#endif // HIGH_BIT_DEPTH
+ pf->coeff_last[DCT_LUMA_DC] = pf->coeff_last[DCT_CHROMAU_DC] = pf->coeff_last[DCT_CHROMAV_DC] =
+ pf->coeff_last[DCT_CHROMAU_4x4] = pf->coeff_last[DCT_CHROMAV_4x4] = pf->coeff_last[DCT_LUMA_4x4];
+ pf->coeff_last[DCT_CHROMA_AC] = pf->coeff_last[DCT_CHROMAU_AC] =
+ pf->coeff_last[DCT_CHROMAV_AC] = pf->coeff_last[DCT_LUMA_AC];
+ pf->coeff_last[DCT_CHROMAU_8x8] = pf->coeff_last[DCT_CHROMAV_8x8] = pf->coeff_last[DCT_LUMA_8x8];
+
+ pf->coeff_level_run[DCT_LUMA_DC] = pf->coeff_level_run[DCT_CHROMAU_DC] = pf->coeff_level_run[DCT_CHROMAV_DC] =
+ pf->coeff_level_run[DCT_CHROMAU_4x4] = pf->coeff_level_run[DCT_CHROMAV_4x4] = pf->coeff_level_run[DCT_LUMA_4x4];
+ pf->coeff_level_run[DCT_CHROMA_AC] = pf->coeff_level_run[DCT_CHROMAU_AC] =
+ pf->coeff_level_run[DCT_CHROMAV_AC] = pf->coeff_level_run[DCT_LUMA_AC];
}