/*****************************************************************************
- * rdo.c: h264 encoder library (rate-distortion optimization)
+ * rdo.c: rate-distortion optimization
*****************************************************************************
- * Copyright (C) 2005 x264 project
+ * Copyright (C) 2005-2013 x264 project
*
* Authors: Loren Merritt <lorenm@u.washington.edu>
+ * Fiona Glaser <fiona@x264.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at licensing@x264.com.
*****************************************************************************/
/* duplicate all the writer functions, just calculating bit cost
* instead of writing the bitstream.
* TODO: use these for fast 1st pass too. */
-#define RDO_SKIP_BS
+#define RDO_SKIP_BS 1
+
+/* Transition and size tables for abs<9 MVD and residual coding */
+/* Consist of i_prefix-2 1s, one zero, and a bypass sign bit */
+uint8_t x264_cabac_transition_unary[15][128];
+uint16_t x264_cabac_size_unary[15][128];
+/* Transition and size tables for abs>9 MVD */
+/* Consist of 5 1s and a bypass sign bit */
+static uint8_t cabac_transition_5ones[128];
+static uint16_t cabac_size_5ones[128];
/* CAVLC: produces exactly the same bit count as a normal encode */
/* this probably still leaves some unnecessary computations */
#define bs_write_ue(s,v) ((s)->i_bits_encoded += bs_size_ue(v))
#define bs_write_se(s,v) ((s)->i_bits_encoded += bs_size_se(v))
#define bs_write_te(s,v,l) ((s)->i_bits_encoded += bs_size_te(v,l))
-#define x264_macroblock_write_cavlc x264_macroblock_size_cavlc
+#define x264_macroblock_write_cavlc static x264_macroblock_size_cavlc
#include "cavlc.c"
/* CABAC: not exactly the same. x264_cabac_size_decision() keeps track of
* fractional bits, but only finite precision. */
+#undef x264_cabac_encode_decision
+#undef x264_cabac_encode_decision_noup
+#undef x264_cabac_encode_bypass
+#undef x264_cabac_encode_terminal
#define x264_cabac_encode_decision(c,x,v) x264_cabac_size_decision(c,x,v)
-#define x264_cabac_encode_terminal(c,v) x264_cabac_size_decision(c,276,v)
+#define x264_cabac_encode_decision_noup(c,x,v) x264_cabac_size_decision_noup(c,x,v)
+#define x264_cabac_encode_terminal(c) ((c)->f8_bits_encoded += 7)
#define x264_cabac_encode_bypass(c,v) ((c)->f8_bits_encoded += 256)
-#define x264_cabac_encode_flush(c)
-#define x264_macroblock_write_cabac x264_macroblock_size_cabac
-#define x264_cabac_mb_skip x264_cabac_mb_size_skip_unused
+#define x264_cabac_encode_ue_bypass(c,e,v) ((c)->f8_bits_encoded += (bs_size_ue_big(v+(1<<e)-1)-e)<<8)
+#define x264_macroblock_write_cabac static x264_macroblock_size_cabac
#include "cabac.c"
+#define COPY_CABAC h->mc.memcpy_aligned( &cabac_tmp.f8_bits_encoded, &h->cabac.f8_bits_encoded, \
+ sizeof(x264_cabac_t) - offsetof(x264_cabac_t,f8_bits_encoded) - (CHROMA444 ? 0 : (1024+12)-460) )
+#define COPY_CABAC_PART( pos, size )\
+ memcpy( &cb->state[pos], &h->cabac.state[pos], size )
+
+static ALWAYS_INLINE uint64_t cached_hadamard( x264_t *h, int size, int x, int y )
+{
+ static const uint8_t hadamard_shift_x[4] = {4, 4, 3, 3};
+ static const uint8_t hadamard_shift_y[4] = {4-0, 3-0, 4-1, 3-1};
+ static const uint8_t hadamard_offset[4] = {0, 1, 3, 5};
+ int cache_index = (x >> hadamard_shift_x[size]) + (y >> hadamard_shift_y[size])
+ + hadamard_offset[size];
+ uint64_t res = h->mb.pic.fenc_hadamard_cache[cache_index];
+ if( res )
+ return res - 1;
+ else
+ {
+ pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
+ res = h->pixf.hadamard_ac[size]( fenc, FENC_STRIDE );
+ h->mb.pic.fenc_hadamard_cache[cache_index] = res + 1;
+ return res;
+ }
+}
+
+static ALWAYS_INLINE int cached_satd( x264_t *h, int size, int x, int y )
+{
+ static const uint8_t satd_shift_x[3] = {3, 2, 2};
+ static const uint8_t satd_shift_y[3] = {2-1, 3-2, 2-2};
+ static const uint8_t satd_offset[3] = {0, 8, 16};
+ ALIGNED_16( static pixel zero[16] ) = {0};
+ int cache_index = (x >> satd_shift_x[size - PIXEL_8x4]) + (y >> satd_shift_y[size - PIXEL_8x4])
+ + satd_offset[size - PIXEL_8x4];
+ int res = h->mb.pic.fenc_satd_cache[cache_index];
+ if( res )
+ return res - 1;
+ else
+ {
+ pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
+ int dc = h->pixf.sad[size]( fenc, FENC_STRIDE, zero, 0 ) >> 1;
+ res = h->pixf.satd[size]( fenc, FENC_STRIDE, zero, 0 ) - dc;
+ h->mb.pic.fenc_satd_cache[cache_index] = res + 1;
+ return res;
+ }
+}
+
+/* Psy RD distortion metric: SSD plus "Absolute Difference of Complexities" */
+/* SATD and SA8D are used to measure block complexity. */
+/* The difference between SATD and SA8D scores are both used to avoid bias from the DCT size. Using SATD */
+/* only, for example, results in overusage of 8x8dct, while the opposite occurs when using SA8D. */
-static int ssd_mb( x264_t *h )
+/* FIXME: Is there a better metric than averaged SATD/SA8D difference for complexity difference? */
+/* Hadamard transform is recursive, so a SATD+SA8D can be done faster by taking advantage of this fact. */
+/* This optimization can also be used in non-RD transform decision. */
+
+static inline int ssd_plane( x264_t *h, int size, int p, int x, int y )
{
- return h->pixf.ssd[PIXEL_16x16]( h->mb.pic.p_fenc[0], FENC_STRIDE,
- h->mb.pic.p_fdec[0], FDEC_STRIDE )
- + h->pixf.ssd[PIXEL_8x8]( h->mb.pic.p_fenc[1], FENC_STRIDE,
- h->mb.pic.p_fdec[1], FDEC_STRIDE )
- + h->pixf.ssd[PIXEL_8x8]( h->mb.pic.p_fenc[2], FENC_STRIDE,
- h->mb.pic.p_fdec[2], FDEC_STRIDE );
+ ALIGNED_16( static pixel zero[16] ) = {0};
+ int satd = 0;
+ pixel *fdec = h->mb.pic.p_fdec[p] + x + y*FDEC_STRIDE;
+ pixel *fenc = h->mb.pic.p_fenc[p] + x + y*FENC_STRIDE;
+ if( p == 0 && h->mb.i_psy_rd )
+ {
+ /* If the plane is smaller than 8x8, we can't do an SA8D; this probably isn't a big problem. */
+ if( size <= PIXEL_8x8 )
+ {
+ uint64_t fdec_acs = h->pixf.hadamard_ac[size]( fdec, FDEC_STRIDE );
+ uint64_t fenc_acs = cached_hadamard( h, size, x, y );
+ satd = abs((int32_t)fdec_acs - (int32_t)fenc_acs)
+ + abs((int32_t)(fdec_acs>>32) - (int32_t)(fenc_acs>>32));
+ satd >>= 1;
+ }
+ else
+ {
+ int dc = h->pixf.sad[size]( fdec, FDEC_STRIDE, zero, 0 ) >> 1;
+ satd = abs(h->pixf.satd[size]( fdec, FDEC_STRIDE, zero, 0 ) - dc - cached_satd( h, size, x, y ));
+ }
+ satd = (satd * h->mb.i_psy_rd * h->mb.i_psy_rd_lambda + 128) >> 8;
+ }
+ return h->pixf.ssd[size](fenc, FENC_STRIDE, fdec, FDEC_STRIDE) + satd;
}
-static int ssd_plane( x264_t *h, int size, int p, int x, int y )
+static inline int ssd_mb( x264_t *h )
{
- return h->pixf.ssd[size]( h->mb.pic.p_fenc[p] + x+y*FENC_STRIDE, FENC_STRIDE,
- h->mb.pic.p_fdec[p] + x+y*FDEC_STRIDE, FDEC_STRIDE );
+ int chroma_size = h->luma2chroma_pixel[PIXEL_16x16];
+ int chroma_ssd = ssd_plane(h, chroma_size, 1, 0, 0) + ssd_plane(h, chroma_size, 2, 0, 0);
+ chroma_ssd = ((uint64_t)chroma_ssd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
+ return ssd_plane(h, PIXEL_16x16, 0, 0, 0) + chroma_ssd;
}
static int x264_rd_cost_mb( x264_t *h, int i_lambda2 )
int b_transform_bak = h->mb.b_transform_8x8;
int i_ssd;
int i_bits;
+ int type_bak = h->mb.i_type;
x264_macroblock_encode( h );
+ if( h->mb.b_deblock_rdo )
+ x264_macroblock_deblock( h );
+
i_ssd = ssd_mb( h );
if( IS_SKIP( h->mb.i_type ) )
{
- i_bits = 1 * i_lambda2;
+ i_bits = (1 * i_lambda2 + 128) >> 8;
}
else if( h->param.b_cabac )
{
- x264_cabac_t cabac_tmp = h->cabac;
- cabac_tmp.f8_bits_encoded = 0;
+ x264_cabac_t cabac_tmp;
+ COPY_CABAC;
x264_macroblock_size_cabac( h, &cabac_tmp );
- i_bits = ( cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
+ i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 32768 ) >> 16;
}
else
{
- bs_t bs_tmp = h->out.bs;
- bs_tmp.i_bits_encoded = 0;
- x264_macroblock_size_cavlc( h, &bs_tmp );
- i_bits = bs_tmp.i_bits_encoded * i_lambda2;
+ x264_macroblock_size_cavlc( h );
+ i_bits = ( h->out.bs.i_bits_encoded * i_lambda2 + 128 ) >> 8;
}
h->mb.b_transform_8x8 = b_transform_bak;
+ h->mb.i_type = type_bak;
return i_ssd + i_bits;
}
-int x264_rd_cost_part( x264_t *h, int i_lambda2, int i8, int i_pixel )
+/* partition RD functions use 8 bits more precision to avoid large rounding errors at low QPs */
+
+static uint64_t x264_rd_cost_subpart( x264_t *h, int i_lambda2, int i4, int i_pixel )
{
- int i_ssd, i_bits;
+ uint64_t i_ssd, i_bits;
+
+ x264_macroblock_encode_p4x4( h, i4 );
+ if( i_pixel == PIXEL_8x4 )
+ x264_macroblock_encode_p4x4( h, i4+1 );
+ if( i_pixel == PIXEL_4x8 )
+ x264_macroblock_encode_p4x4( h, i4+2 );
+
+ i_ssd = ssd_plane( h, i_pixel, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
+ if( CHROMA444 )
+ {
+ int chromassd = ssd_plane( h, i_pixel, 1, block_idx_x[i4]*4, block_idx_y[i4]*4 )
+ + ssd_plane( h, i_pixel, 2, block_idx_x[i4]*4, block_idx_y[i4]*4 );
+ chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
+ i_ssd += chromassd;
+ }
+
+ if( h->param.b_cabac )
+ {
+ x264_cabac_t cabac_tmp;
+ COPY_CABAC;
+ x264_subpartition_size_cabac( h, &cabac_tmp, i4, i_pixel );
+ i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
+ }
+ else
+ i_bits = x264_subpartition_size_cavlc( h, i4, i_pixel );
+
+ return (i_ssd<<8) + i_bits;
+}
+
+uint64_t x264_rd_cost_part( x264_t *h, int i_lambda2, int i4, int i_pixel )
+{
+ uint64_t i_ssd, i_bits;
+ int i8 = i4 >> 2;
if( i_pixel == PIXEL_16x16 )
{
- int type_bak = h->mb.i_type;
int i_cost = x264_rd_cost_mb( h, i_lambda2 );
- h->mb.i_type = type_bak;
return i_cost;
}
+ if( i_pixel > PIXEL_8x8 )
+ return x264_rd_cost_subpart( h, i_lambda2, i4, i_pixel );
+
+ h->mb.i_cbp_luma = 0;
+
x264_macroblock_encode_p8x8( h, i8 );
if( i_pixel == PIXEL_16x8 )
x264_macroblock_encode_p8x8( h, i8+1 );
if( i_pixel == PIXEL_8x16 )
x264_macroblock_encode_p8x8( h, i8+2 );
- i_ssd = ssd_plane( h, i_pixel, 0, (i8&1)*8, (i8>>1)*8 )
- + ssd_plane( h, i_pixel+3, 1, (i8&1)*4, (i8>>1)*4 )
- + ssd_plane( h, i_pixel+3, 2, (i8&1)*4, (i8>>1)*4 );
+ int ssd_x = 8*(i8&1);
+ int ssd_y = 8*(i8>>1);
+ i_ssd = ssd_plane( h, i_pixel, 0, ssd_x, ssd_y );
+ int chromapix = h->luma2chroma_pixel[i_pixel];
+ int chromassd = ssd_plane( h, chromapix, 1, ssd_x>>CHROMA_H_SHIFT, ssd_y>>CHROMA_V_SHIFT )
+ + ssd_plane( h, chromapix, 2, ssd_x>>CHROMA_H_SHIFT, ssd_y>>CHROMA_V_SHIFT );
+ i_ssd += ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
if( h->param.b_cabac )
{
- x264_cabac_t cabac_tmp = h->cabac;
- cabac_tmp.f8_bits_encoded = 0;
+ x264_cabac_t cabac_tmp;
+ COPY_CABAC;
x264_partition_size_cabac( h, &cabac_tmp, i8, i_pixel );
- i_bits = ( cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
+ i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
}
else
- {
i_bits = x264_partition_size_cavlc( h, i8, i_pixel ) * i_lambda2;
- }
- return i_ssd + i_bits;
+ return (i_ssd<<8) + i_bits;
}
-int x264_rd_cost_i8x8( x264_t *h, int i_lambda2, int i8, int i_mode )
+static uint64_t x264_rd_cost_i8x8( x264_t *h, int i_lambda2, int i8, int i_mode, pixel edge[4][32] )
{
- int i_ssd, i_bits;
+ uint64_t i_ssd, i_bits;
+ int plane_count = CHROMA444 ? 3 : 1;
+ int i_qp = h->mb.i_qp;
+ h->mb.i_cbp_luma &= ~(1<<i8);
+ h->mb.b_transform_8x8 = 1;
+
+ for( int p = 0; p < plane_count; p++ )
+ {
+ x264_mb_encode_i8x8( h, p, i8, i_qp, i_mode, edge[p], 1 );
+ i_qp = h->mb.i_chroma_qp;
+ }
- x264_mb_encode_i8x8( h, i8, h->mb.i_qp );
i_ssd = ssd_plane( h, PIXEL_8x8, 0, (i8&1)*8, (i8>>1)*8 );
+ if( CHROMA444 )
+ {
+ int chromassd = ssd_plane( h, PIXEL_8x8, 1, (i8&1)*8, (i8>>1)*8 )
+ + ssd_plane( h, PIXEL_8x8, 2, (i8&1)*8, (i8>>1)*8 );
+ chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
+ i_ssd += chromassd;
+ }
if( h->param.b_cabac )
{
- x264_cabac_t cabac_tmp = h->cabac;
- cabac_tmp.f8_bits_encoded = 0;
+ x264_cabac_t cabac_tmp;
+ COPY_CABAC;
x264_partition_i8x8_size_cabac( h, &cabac_tmp, i8, i_mode );
- i_bits = ( cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
+ i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
}
else
- {
i_bits = x264_partition_i8x8_size_cavlc( h, i8, i_mode ) * i_lambda2;
- }
- return i_ssd + i_bits;
+ return (i_ssd<<8) + i_bits;
}
-int x264_rd_cost_i4x4( x264_t *h, int i_lambda2, int i4, int i_mode )
+static uint64_t x264_rd_cost_i4x4( x264_t *h, int i_lambda2, int i4, int i_mode )
{
- int i_ssd, i_bits;
+ uint64_t i_ssd, i_bits;
+ int plane_count = CHROMA444 ? 3 : 1;
+ int i_qp = h->mb.i_qp;
+
+ for( int p = 0; p < plane_count; p++ )
+ {
+ x264_mb_encode_i4x4( h, p, i4, i_qp, i_mode, 1 );
+ i_qp = h->mb.i_chroma_qp;
+ }
- x264_mb_encode_i4x4( h, i4, h->mb.i_qp );
i_ssd = ssd_plane( h, PIXEL_4x4, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
+ if( CHROMA444 )
+ {
+ int chromassd = ssd_plane( h, PIXEL_4x4, 1, block_idx_x[i4]*4, block_idx_y[i4]*4 )
+ + ssd_plane( h, PIXEL_4x4, 2, block_idx_x[i4]*4, block_idx_y[i4]*4 );
+ chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
+ i_ssd += chromassd;
+ }
if( h->param.b_cabac )
{
- x264_cabac_t cabac_tmp = h->cabac;
- cabac_tmp.f8_bits_encoded = 0;
+ x264_cabac_t cabac_tmp;
+ COPY_CABAC;
x264_partition_i4x4_size_cabac( h, &cabac_tmp, i4, i_mode );
- i_bits = ( cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
+ i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
}
else
- {
i_bits = x264_partition_i4x4_size_cavlc( h, i4, i_mode ) * i_lambda2;
- }
- return i_ssd + i_bits;
+ return (i_ssd<<8) + i_bits;
}
-int x264_rd_cost_i8x8_chroma( x264_t *h, int i_lambda2, int i_mode, int b_dct )
+static uint64_t x264_rd_cost_chroma( x264_t *h, int i_lambda2, int i_mode, int b_dct )
{
- int i_ssd, i_bits;
+ uint64_t i_ssd, i_bits;
if( b_dct )
- x264_mb_encode_8x8_chroma( h, 0, h->mb.i_chroma_qp );
- i_ssd = ssd_plane( h, PIXEL_8x8, 1, 0, 0 ) +
- ssd_plane( h, PIXEL_8x8, 2, 0, 0 );
+ x264_mb_encode_chroma( h, 0, h->mb.i_chroma_qp );
+
+ int chromapix = h->luma2chroma_pixel[PIXEL_16x16];
+ i_ssd = ssd_plane( h, chromapix, 1, 0, 0 )
+ + ssd_plane( h, chromapix, 2, 0, 0 );
h->mb.i_chroma_pred_mode = i_mode;
if( h->param.b_cabac )
{
- x264_cabac_t cabac_tmp = h->cabac;
- cabac_tmp.f8_bits_encoded = 0;
- x264_i8x8_chroma_size_cabac( h, &cabac_tmp );
- i_bits = ( cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
+ x264_cabac_t cabac_tmp;
+ COPY_CABAC;
+ x264_chroma_size_cabac( h, &cabac_tmp );
+ i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
}
else
- {
- i_bits = x264_i8x8_chroma_size_cavlc( h ) * i_lambda2;
- }
+ i_bits = x264_chroma_size_cavlc( h ) * i_lambda2;
- return i_ssd + i_bits;
+ return (i_ssd<<8) + i_bits;
}
/****************************************************************************
* Trellis RD quantization
****************************************************************************/
-#define TRELLIS_SCORE_MAX ((uint64_t)1<<50)
+#define TRELLIS_SCORE_MAX -1LL // negative marks the node as invalid
+#define TRELLIS_SCORE_BIAS 1LL<<60; // bias so that all valid scores are positive, even after negative contributions from psy
#define CABAC_SIZE_BITS 8
-#define SSD_WEIGHT_BITS 5
#define LAMBDA_BITS 4
-/* precalculate the cost of coding abs_level_m1 */
-static int cabac_prefix_transition[15][128];
-static int cabac_prefix_size[15][128];
-void x264_rdo_init( )
+/* precalculate the cost of coding various combinations of bits in a single context */
+void x264_rdo_init( void )
{
- int i_prefix;
- int i_ctx;
- for( i_prefix = 0; i_prefix < 15; i_prefix++ )
+ for( int i_prefix = 0; i_prefix < 15; i_prefix++ )
{
- for( i_ctx = 0; i_ctx < 128; i_ctx++ )
+ for( int i_ctx = 0; i_ctx < 128; i_ctx++ )
{
int f8_bits = 0;
uint8_t ctx = i_ctx;
- int i;
- for( i = 1; i < i_prefix; i++ )
+ for( int i = 1; i < i_prefix; i++ )
f8_bits += x264_cabac_size_decision2( &ctx, 1 );
if( i_prefix > 0 && i_prefix < 14 )
f8_bits += x264_cabac_size_decision2( &ctx, 0 );
f8_bits += 1 << CABAC_SIZE_BITS; //sign
- cabac_prefix_size[i_prefix][i_ctx] = f8_bits;
- cabac_prefix_transition[i_prefix][i_ctx] = ctx;
+ x264_cabac_size_unary[i_prefix][i_ctx] = f8_bits;
+ x264_cabac_transition_unary[i_prefix][i_ctx] = ctx;
}
}
+ for( int i_ctx = 0; i_ctx < 128; i_ctx++ )
+ {
+ int f8_bits = 0;
+ uint8_t ctx = i_ctx;
+
+ for( int i = 0; i < 5; i++ )
+ f8_bits += x264_cabac_size_decision2( &ctx, 1 );
+ f8_bits += 1 << CABAC_SIZE_BITS; //sign
+
+ cabac_size_5ones[i_ctx] = f8_bits;
+ cabac_transition_5ones[i_ctx] = ctx;
+ }
}
-// node ctx: 0..3: abslevel1 (with abslevelgt1 == 0).
-// 4..7: abslevelgt1 + 3 (and abslevel1 doesn't matter).
-/* map node ctx => cabac ctx for level=1 */
-static const int coeff_abs_level1_ctx[8] = { 1, 2, 3, 4, 0, 0, 0, 0 };
-/* map node ctx => cabac ctx for level>1 */
-static const int coeff_abs_levelgt1_ctx[8] = { 5, 5, 5, 5, 6, 7, 8, 9 };
-static const int coeff_abs_level_transition[2][8] = {
-/* update node.ctx after coding a level=1 */
- { 1, 2, 3, 3, 4, 5, 6, 7 },
-/* update node.ctx after coding a level>1 */
- { 4, 4, 4, 4, 5, 6, 7, 7 }
-};
-
-static const int lambda2_tab[6] = { 1024, 1290, 1625, 2048, 2580, 3251 };
-
-typedef struct {
+typedef struct
+{
uint64_t score;
int level_idx; // index into level_tree[]
- uint8_t cabac_state[10]; //just the contexts relevant to coding abs_level_m1
+ uint8_t cabac_state[4]; // just contexts 0,4,8,9 of the 10 relevant to coding abs_level_m1
} trellis_node_t;
+typedef struct
+{
+ uint16_t next;
+ uint16_t abs_level;
+} trellis_level_t;
+
// TODO:
-// support chroma and i16x16 DC
// save cabac state between blocks?
// use trellis' RD score instead of x264_mb_decimate_score?
// code 8x8 sig/last flags forwards with deadzone and save the contexts at
// comparable to the input. so unquant is the direct inverse of quant,
// and uses the dct scaling factors, not the idct ones.
-static void quant_trellis_cabac( x264_t *h, int16_t *dct,
- const uint16_t *quant_mf, const int *unquant_mf,
- const int *coef_weight, const int *zigzag,
- int i_ctxBlockCat, int i_qbits, int i_lambda2, int b_ac, int i_coefs )
+#define SIGN(x,y) ((x^(y >> 31))-(y >> 31))
+
+#define SET_LEVEL(ndst, nsrc, l) {\
+ if( sizeof(trellis_level_t) == sizeof(uint32_t) )\
+ M32( &level_tree[levels_used] ) = pack16to32( nsrc.level_idx, l );\
+ else\
+ level_tree[levels_used] = (trellis_level_t){ nsrc.level_idx, l };\
+ ndst.level_idx = levels_used;\
+ levels_used++;\
+}
+
+// encode all values of the dc coef in a block which is known to have no ac
+static NOINLINE
+int trellis_dc_shortcut( int sign_coef, int quant_coef, int unquant_mf, int coef_weight, int lambda2, uint8_t *cabac_state, int cost_sig )
{
- int abs_coefs[64], signs[64];
- trellis_node_t nodes[2][8];
- trellis_node_t *nodes_cur = nodes[0];
- trellis_node_t *nodes_prev = nodes[1];
- trellis_node_t *bnode;
- uint8_t cabac_state_sig[64];
- uint8_t cabac_state_last[64];
- const int b_interlaced = h->mb.b_interlaced;
- const int f = 1 << 15; // no deadzone
- int i_last_nnz = -1;
- int i, j;
+ uint64_t bscore = TRELLIS_SCORE_MAX;
+ int ret = 0;
+ int q = abs( quant_coef );
+ for( int abs_level = q-1; abs_level <= q; abs_level++ )
+ {
+ int unquant_abs_level = (unquant_mf * abs_level + 128) >> 8;
- // (# of coefs) * (# of ctx) * (# of levels tried) = 1024
- // we don't need to keep all of those: (# of coefs) * (# of ctx) would be enough,
- // but it takes more time to remove dead states than you gain in reduced memory.
- struct {
- uint16_t abs_level;
- uint16_t next;
- } level_tree[64*8*2];
- int i_levels_used = 1;
+ /* Optimize rounding for DC coefficients in DC-only luma 4x4/8x8 blocks. */
+ int d = sign_coef - ((SIGN(unquant_abs_level, sign_coef) + 8)&~15);
+ uint64_t score = (uint64_t)d*d * coef_weight;
+
+ /* code the proposed level, and count how much entropy it would take */
+ if( abs_level )
+ {
+ unsigned f8_bits = cost_sig;
+ int prefix = X264_MIN( abs_level - 1, 14 );
+ f8_bits += x264_cabac_size_decision_noup2( cabac_state+1, prefix > 0 );
+ f8_bits += x264_cabac_size_unary[prefix][cabac_state[5]];
+ if( abs_level >= 15 )
+ f8_bits += bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS;
+ score += (uint64_t)f8_bits * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
+ }
+
+ COPY2_IF_LT( bscore, score, ret, abs_level );
+ }
+ return SIGN(ret, sign_coef);
+}
+
+// encode one value of one coef in one context
+static ALWAYS_INLINE
+int trellis_coef( int j, int const_level, int abs_level, int prefix, int suffix_cost,
+ int node_ctx, int level1_ctx, int levelgt1_ctx, uint64_t ssd, int cost_siglast[3],
+ trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
+ trellis_level_t *level_tree, int levels_used, int lambda2, uint8_t *level_state )
+{
+ uint64_t score = nodes_prev[j].score + ssd;
+ /* code the proposed level, and count how much entropy it would take */
+ unsigned f8_bits = cost_siglast[ j ? 1 : 2 ];
+ uint8_t level1_state = (j >= 3) ? nodes_prev[j].cabac_state[level1_ctx>>2] : level_state[level1_ctx];
+ f8_bits += x264_cabac_entropy[level1_state ^ (const_level > 1)];
+ uint8_t levelgt1_state;
+ if( const_level > 1 )
+ {
+ levelgt1_state = j >= 6 ? nodes_prev[j].cabac_state[levelgt1_ctx-6] : level_state[levelgt1_ctx];
+ f8_bits += x264_cabac_size_unary[prefix][levelgt1_state] + suffix_cost;
+ }
+ else
+ f8_bits += 1 << CABAC_SIZE_BITS;
+ score += (uint64_t)f8_bits * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
+
+ /* save the node if it's better than any existing node with the same cabac ctx */
+ if( score < nodes_cur[node_ctx].score )
+ {
+ nodes_cur[node_ctx].score = score;
+ if( j == 2 || (j <= 3 && node_ctx == 4) ) // init from input state
+ M32(nodes_cur[node_ctx].cabac_state) = M32(level_state+12);
+ else if( j >= 3 )
+ M32(nodes_cur[node_ctx].cabac_state) = M32(nodes_prev[j].cabac_state);
+ if( j >= 3 ) // skip the transition if we're not going to reuse the context
+ nodes_cur[node_ctx].cabac_state[level1_ctx>>2] = x264_cabac_transition[level1_state][const_level > 1];
+ if( const_level > 1 && node_ctx == 7 )
+ nodes_cur[node_ctx].cabac_state[levelgt1_ctx-6] = x264_cabac_transition_unary[prefix][levelgt1_state];
+ nodes_cur[node_ctx].level_idx = nodes_prev[j].level_idx;
+ SET_LEVEL( nodes_cur[node_ctx], nodes_prev[j], abs_level );
+ }
+ return levels_used;
+}
+
+// encode one value of one coef in all contexts, templated by which value that is.
+// in ctx_lo, the set of live nodes is contiguous and starts at ctx0, so return as soon as we've seen one failure.
+// in ctx_hi, they're contiguous within each block of 4 ctxs, but not necessarily starting at the beginning,
+// so exploiting that would be more complicated.
+static NOINLINE
+int trellis_coef0_0( uint64_t ssd0, trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
+ trellis_level_t *level_tree, int levels_used )
+{
+ nodes_cur[0].score = nodes_prev[0].score + ssd0;
+ nodes_cur[0].level_idx = nodes_prev[0].level_idx;
+ for( int j = 1; j < 4 && (int64_t)nodes_prev[j].score >= 0; j++ )
+ {
+ nodes_cur[j].score = nodes_prev[j].score;
+ if( j >= 3 )
+ M32(nodes_cur[j].cabac_state) = M32(nodes_prev[j].cabac_state);
+ SET_LEVEL( nodes_cur[j], nodes_prev[j], 0 );
+ }
+ return levels_used;
+}
+
+static NOINLINE
+int trellis_coef0_1( uint64_t ssd0, trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
+ trellis_level_t *level_tree, int levels_used )
+{
+ for( int j = 1; j < 8; j++ )
+ // this branch only affects speed, not function; there's nothing wrong with updating invalid nodes in coef0.
+ if( (int64_t)nodes_prev[j].score >= 0 )
+ {
+ nodes_cur[j].score = nodes_prev[j].score;
+ if( j >= 3 )
+ M32(nodes_cur[j].cabac_state) = M32(nodes_prev[j].cabac_state);
+ SET_LEVEL( nodes_cur[j], nodes_prev[j], 0 );
+ }
+ return levels_used;
+}
+
+#define COEF(const_level, ctx_hi, j, ...)\
+ if( !j || (int64_t)nodes_prev[j].score >= 0 )\
+ levels_used = trellis_coef( j, const_level, abs_level, prefix, suffix_cost, __VA_ARGS__,\
+ j?ssd1:ssd0, cost_siglast, nodes_cur, nodes_prev,\
+ level_tree, levels_used, lambda2, level_state );\
+ else if( !ctx_hi )\
+ return levels_used;
+
+static NOINLINE
+int trellis_coef1_0( uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
+ trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
+ trellis_level_t *level_tree, int levels_used, int lambda2,
+ uint8_t *level_state )
+{
+ int abs_level = 1, prefix = 1, suffix_cost = 0;
+ COEF( 1, 0, 0, 1, 1, 0 );
+ COEF( 1, 0, 1, 2, 2, 0 );
+ COEF( 1, 0, 2, 3, 3, 0 );
+ COEF( 1, 0, 3, 3, 4, 0 );
+ return levels_used;
+}
+
+static NOINLINE
+int trellis_coef1_1( uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
+ trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
+ trellis_level_t *level_tree, int levels_used, int lambda2,
+ uint8_t *level_state )
+{
+ int abs_level = 1, prefix = 1, suffix_cost = 0;
+ COEF( 1, 1, 1, 2, 2, 0 );
+ COEF( 1, 1, 2, 3, 3, 0 );
+ COEF( 1, 1, 3, 3, 4, 0 );
+ COEF( 1, 1, 4, 4, 0, 0 );
+ COEF( 1, 1, 5, 5, 0, 0 );
+ COEF( 1, 1, 6, 6, 0, 0 );
+ COEF( 1, 1, 7, 7, 0, 0 );
+ return levels_used;
+}
+
+static NOINLINE
+int trellis_coefn_0( int abs_level, uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
+ trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
+ trellis_level_t *level_tree, int levels_used, int lambda2,
+ uint8_t *level_state, int levelgt1_ctx )
+{
+ int prefix = X264_MIN( abs_level-1, 14 );
+ int suffix_cost = abs_level >= 15 ? bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS : 0;
+ COEF( 2, 0, 0, 4, 1, 5 );
+ COEF( 2, 0, 1, 4, 2, 5 );
+ COEF( 2, 0, 2, 4, 3, 5 );
+ COEF( 2, 0, 3, 4, 4, 5 );
+ return levels_used;
+}
- /* init coefs */
- for( i = b_ac; i < i_coefs; i++ )
+static NOINLINE
+int trellis_coefn_1( int abs_level, uint64_t ssd0, uint64_t ssd1, int cost_siglast[3],
+ trellis_node_t *nodes_cur, trellis_node_t *nodes_prev,
+ trellis_level_t *level_tree, int levels_used, int lambda2,
+ uint8_t *level_state, int levelgt1_ctx )
+{
+ int prefix = X264_MIN( abs_level-1, 14 );
+ int suffix_cost = abs_level >= 15 ? bs_size_ue_big( abs_level - 15 ) << CABAC_SIZE_BITS : 0;
+ COEF( 2, 1, 1, 4, 2, 5 );
+ COEF( 2, 1, 2, 4, 3, 5 );
+ COEF( 2, 1, 3, 4, 4, 5 );
+ COEF( 2, 1, 4, 5, 0, 6 );
+ COEF( 2, 1, 5, 6, 0, 7 );
+ COEF( 2, 1, 6, 7, 0, 8 );
+ COEF( 2, 1, 7, 7, 0, levelgt1_ctx );
+ return levels_used;
+}
+
+static ALWAYS_INLINE
+int quant_trellis_cabac( x264_t *h, dctcoef *dct,
+ udctcoef *quant_mf, udctcoef *quant_bias, const int *unquant_mf,
+ const uint8_t *zigzag, int ctx_block_cat, int lambda2, int b_ac,
+ int b_chroma, int dc, int num_coefs, int idx )
+{
+ ALIGNED_ARRAY_N( dctcoef, orig_coefs, [64] );
+ ALIGNED_ARRAY_N( dctcoef, quant_coefs, [64] );
+ const uint32_t *coef_weight1 = num_coefs == 64 ? x264_dct8_weight_tab : x264_dct4_weight_tab;
+ const uint32_t *coef_weight2 = num_coefs == 64 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
+ const int b_interlaced = MB_INTERLACED;
+ uint8_t *cabac_state_sig = &h->cabac.state[ x264_significant_coeff_flag_offset[b_interlaced][ctx_block_cat] ];
+ uint8_t *cabac_state_last = &h->cabac.state[ x264_last_coeff_flag_offset[b_interlaced][ctx_block_cat] ];
+ int levelgt1_ctx = b_chroma && dc ? 8 : 9;
+
+ if( dc )
{
- int coef = dct[zigzag[i]];
- abs_coefs[i] = abs(coef);
- signs[i] = coef < 0 ? -1 : 1;
- if( f <= abs_coefs[i] * quant_mf[zigzag[i]] )
- i_last_nnz = i;
+ if( num_coefs == 16 )
+ {
+ memcpy( orig_coefs, dct, sizeof(dctcoef)*16 );
+ if( !h->quantf.quant_4x4_dc( dct, quant_mf[0] >> 1, quant_bias[0] << 1 ) )
+ return 0;
+ h->zigzagf.scan_4x4( quant_coefs, dct );
+ }
+ else
+ {
+ memcpy( orig_coefs, dct, sizeof(dctcoef)*num_coefs );
+ int nz = h->quantf.quant_2x2_dc( &dct[0], quant_mf[0] >> 1, quant_bias[0] << 1 );
+ if( num_coefs == 8 )
+ nz |= h->quantf.quant_2x2_dc( &dct[4], quant_mf[0] >> 1, quant_bias[0] << 1 );
+ if( !nz )
+ return 0;
+ for( int i = 0; i < num_coefs; i++ )
+ quant_coefs[i] = dct[zigzag[i]];
+ }
}
+ else
+ {
+ if( num_coefs == 64 )
+ {
+ h->mc.memcpy_aligned( orig_coefs, dct, sizeof(dctcoef)*64 );
+ if( !h->quantf.quant_8x8( dct, quant_mf, quant_bias ) )
+ return 0;
+ h->zigzagf.scan_8x8( quant_coefs, dct );
+ }
+ else //if( num_coefs == 16 )
+ {
+ memcpy( orig_coefs, dct, sizeof(dctcoef)*16 );
+ if( !h->quantf.quant_4x4( dct, quant_mf, quant_bias ) )
+ return 0;
+ h->zigzagf.scan_4x4( quant_coefs, dct );
+ }
+ }
+
+ int last_nnz = h->quantf.coeff_last[ctx_block_cat]( quant_coefs+b_ac )+b_ac;
+ uint8_t *cabac_state = &h->cabac.state[ x264_coeff_abs_level_m1_offset[ctx_block_cat] ];
- if( i_last_nnz == -1 )
+ /* shortcut for dc-only blocks.
+ * this doesn't affect the output, but saves some unnecessary computation. */
+ if( last_nnz == 0 && !dc )
{
- memset( dct, 0, i_coefs * sizeof(*dct) );
- return;
+ int cost_sig = x264_cabac_size_decision_noup2( &cabac_state_sig[0], 1 )
+ + x264_cabac_size_decision_noup2( &cabac_state_last[0], 1 );
+ dct[0] = trellis_dc_shortcut( orig_coefs[0], quant_coefs[0], unquant_mf[0], coef_weight2[0], lambda2, cabac_state, cost_sig );
+ return !!dct[0];
}
+#if HAVE_MMX && ARCH_X86_64
+#define TRELLIS_ARGS unquant_mf, zigzag, lambda2, last_nnz, orig_coefs, quant_coefs, dct,\
+ cabac_state_sig, cabac_state_last, M64(cabac_state), M16(cabac_state+8)
+ if( num_coefs == 16 && !dc )
+ if( b_chroma || !h->mb.i_psy_trellis )
+ return h->quantf.trellis_cabac_4x4( TRELLIS_ARGS, b_ac );
+ else
+ return h->quantf.trellis_cabac_4x4_psy( TRELLIS_ARGS, b_ac, h->mb.pic.fenc_dct4[idx&15], h->mb.i_psy_trellis );
+ else if( num_coefs == 64 && !dc )
+ if( b_chroma || !h->mb.i_psy_trellis )
+ return h->quantf.trellis_cabac_8x8( TRELLIS_ARGS, b_interlaced );
+ else
+ return h->quantf.trellis_cabac_8x8_psy( TRELLIS_ARGS, b_interlaced, h->mb.pic.fenc_dct8[idx&3], h->mb.i_psy_trellis);
+ else if( num_coefs == 8 && dc )
+ return h->quantf.trellis_cabac_chroma_422_dc( TRELLIS_ARGS );
+ else if( dc )
+ return h->quantf.trellis_cabac_dc( TRELLIS_ARGS, num_coefs-1 );
+#endif
+
+ // (# of coefs) * (# of ctx) * (# of levels tried) = 1024
+ // we don't need to keep all of those: (# of coefs) * (# of ctx) would be enough,
+ // but it takes more time to remove dead states than you gain in reduced memory.
+ trellis_level_t level_tree[64*8*2];
+ int levels_used = 1;
/* init trellis */
- for( i = 1; i < 8; i++ )
- nodes_cur[i].score = TRELLIS_SCORE_MAX;
- nodes_cur[0].score = 0;
+ trellis_node_t nodes[2][8];
+ trellis_node_t *nodes_cur = nodes[0];
+ trellis_node_t *nodes_prev = nodes[1];
+ trellis_node_t *bnode;
+ for( int j = 1; j < 4; j++ )
+ nodes_cur[j].score = TRELLIS_SCORE_MAX;
+ nodes_cur[0].score = TRELLIS_SCORE_BIAS;
nodes_cur[0].level_idx = 0;
level_tree[0].abs_level = 0;
level_tree[0].next = 0;
+ ALIGNED_4( uint8_t level_state[16] );
+ memcpy( level_state, cabac_state, 10 );
+ level_state[12] = cabac_state[0]; // packed subset for copying into trellis_node_t
+ level_state[13] = cabac_state[4];
+ level_state[14] = cabac_state[8];
+ level_state[15] = cabac_state[9];
+
+ idx &= num_coefs == 64 ? 3 : 15;
// coefs are processed in reverse order, because that's how the abs value is coded.
// last_coef and significant_coef flags are normally coded in forward order, but
// position, so the order doesn't matter, and we don't even have to update their contexts.
// in 8x8 blocks, some positions share contexts, so we'll just have to hope that
// cabac isn't too sensitive.
+ int i = last_nnz;
+#define TRELLIS_LOOP(ctx_hi)\
+ for( ; i >= b_ac; i-- )\
+ {\
+ /* skip 0s: this doesn't affect the output, but saves some unnecessary computation. */\
+ if( !quant_coefs[i] )\
+ {\
+ /* no need to calculate ssd of 0s: it's the same in all nodes.\
+ * no need to modify level_tree for ctx=0: it starts with an infinite loop of 0s.
+ * subtracting from one score is equivalent to adding to the rest. */\
+ if( !ctx_hi )\
+ {\
+ int sigindex = !dc && num_coefs == 64 ? x264_significant_coeff_flag_offset_8x8[b_interlaced][i] :\
+ b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\
+ uint64_t cost_sig0 = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 0 )\
+ * (uint64_t)lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );\
+ nodes_cur[0].score -= cost_sig0;\
+ }\
+ for( int j = 1; j < (ctx_hi?8:4); j++ )\
+ SET_LEVEL( nodes_cur[j], nodes_cur[j], 0 );\
+ continue;\
+ }\
+\
+ int sign_coef = orig_coefs[zigzag[i]];\
+ int abs_coef = abs( sign_coef );\
+ int q = abs( quant_coefs[i] );\
+ int cost_siglast[3]; /* { zero, nonzero, nonzero-and-last } */\
+ XCHG( trellis_node_t*, nodes_cur, nodes_prev );\
+ for( int j = ctx_hi; j < 8; j++ )\
+ nodes_cur[j].score = TRELLIS_SCORE_MAX;\
+\
+ if( i < num_coefs-1 || ctx_hi )\
+ {\
+ int sigindex = !dc && num_coefs == 64 ? x264_significant_coeff_flag_offset_8x8[b_interlaced][i] :\
+ b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\
+ int lastindex = !dc && num_coefs == 64 ? x264_last_coeff_flag_offset_8x8[i] :\
+ b_chroma && dc && num_coefs == 8 ? x264_coeff_flag_offset_chroma_422_dc[i] : i;\
+ cost_siglast[0] = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 0 );\
+ int cost_sig1 = x264_cabac_size_decision_noup2( &cabac_state_sig[sigindex], 1 );\
+ cost_siglast[1] = x264_cabac_size_decision_noup2( &cabac_state_last[lastindex], 0 ) + cost_sig1;\
+ if( !ctx_hi )\
+ cost_siglast[2] = x264_cabac_size_decision_noup2( &cabac_state_last[lastindex], 1 ) + cost_sig1;\
+ }\
+ else\
+ {\
+ cost_siglast[0] = cost_siglast[1] = cost_siglast[2] = 0;\
+ }\
+\
+ /* there are a few cases where increasing the coeff magnitude helps,\
+ * but it's only around .003 dB, and skipping them ~doubles the speed of trellis.\
+ * could also try q-2: that sometimes helps, but also sometimes decimates blocks\
+ * that are better left coded, especially at QP > 40. */\
+ uint64_t ssd0[2], ssd1[2];\
+ for( int k = 0; k < 2; k++ )\
+ {\
+ int abs_level = q-1+k;\
+ int unquant_abs_level = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[i]]) * abs_level + 128) >> 8);\
+ int d = abs_coef - unquant_abs_level;\
+ /* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */\
+ if( h->mb.i_psy_trellis && i && !dc && !b_chroma )\
+ {\
+ int orig_coef = (num_coefs == 64) ? h->mb.pic.fenc_dct8[idx][zigzag[i]] : h->mb.pic.fenc_dct4[idx][zigzag[i]];\
+ int predicted_coef = orig_coef - sign_coef;\
+ int psy_value = abs(unquant_abs_level + SIGN(predicted_coef, sign_coef));\
+ int psy_weight = coef_weight1[zigzag[i]] * h->mb.i_psy_trellis;\
+ ssd1[k] = (uint64_t)d*d * coef_weight2[zigzag[i]] - psy_weight * psy_value;\
+ }\
+ else\
+ /* FIXME: for i16x16 dc is this weight optimal? */\
+ ssd1[k] = (uint64_t)d*d * (dc?256:coef_weight2[zigzag[i]]);\
+ ssd0[k] = ssd1[k];\
+ if( !i && !dc && !ctx_hi )\
+ {\
+ /* Optimize rounding for DC coefficients in DC-only luma 4x4/8x8 blocks. */\
+ d = sign_coef - ((SIGN(unquant_abs_level, sign_coef) + 8)&~15);\
+ ssd0[k] = (uint64_t)d*d * coef_weight2[zigzag[i]];\
+ }\
+ }\
+\
+ /* argument passing imposes some significant overhead here. gcc's interprocedural register allocation isn't up to it. */\
+ switch( q )\
+ {\
+ case 1:\
+ ssd1[0] += (uint64_t)cost_siglast[0] * lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );\
+ levels_used = trellis_coef0_##ctx_hi( ssd0[0]-ssd1[0], nodes_cur, nodes_prev, level_tree, levels_used );\
+ levels_used = trellis_coef1_##ctx_hi( ssd0[1]-ssd1[0], ssd1[1]-ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state );\
+ goto next##ctx_hi;\
+ case 2:\
+ levels_used = trellis_coef1_##ctx_hi( ssd0[0], ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state );\
+ levels_used = trellis_coefn_##ctx_hi( q, ssd0[1], ssd1[1], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\
+ goto next1;\
+ default:\
+ levels_used = trellis_coefn_##ctx_hi( q-1, ssd0[0], ssd1[0], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\
+ levels_used = trellis_coefn_##ctx_hi( q, ssd0[1], ssd1[1], cost_siglast, nodes_cur, nodes_prev, level_tree, levels_used, lambda2, level_state, levelgt1_ctx );\
+ goto next1;\
+ }\
+ next##ctx_hi:;\
+ }\
+ /* output levels from the best path through the trellis */\
+ bnode = &nodes_cur[ctx_hi];\
+ for( int j = ctx_hi+1; j < (ctx_hi?8:4); j++ )\
+ if( nodes_cur[j].score < bnode->score )\
+ bnode = &nodes_cur[j];
+
+ // keep 2 versions of the main quantization loop, depending on which subsets of the node_ctxs are live
+ // node_ctx 0..3, i.e. having not yet encountered any coefs that might be quantized to >1
+ TRELLIS_LOOP(0);
- if( i_coefs == 64 )
+ if( bnode == &nodes_cur[0] )
{
- const uint8_t *ctx_sig = &h->cabac.state[ significant_coeff_flag_offset[b_interlaced][i_ctxBlockCat] ];
- const uint8_t *ctx_last = &h->cabac.state[ last_coeff_flag_offset[b_interlaced][i_ctxBlockCat] ];
- for( i = 0; i < 63; i++ )
- {
- cabac_state_sig[i] = ctx_sig[ significant_coeff_flag_offset_8x8[b_interlaced][i] ];
- cabac_state_last[i] = ctx_last[ last_coeff_flag_offset_8x8[i] ];
- }
+ /* We only need to zero an empty 4x4 block. 8x8 can be
+ implicitly emptied via zero nnz, as can dc. */
+ if( num_coefs == 16 && !dc )
+ memset( dct, 0, 16 * sizeof(dctcoef) );
+ return 0;
}
- else
+
+ if(0) // accessible only by goto, not fallthrough
{
- memcpy( cabac_state_sig, &h->cabac.state[ significant_coeff_flag_offset[b_interlaced][i_ctxBlockCat] ], 15 );
- memcpy( cabac_state_last, &h->cabac.state[ last_coeff_flag_offset[b_interlaced][i_ctxBlockCat] ], 15 );
+ // node_ctx 1..7 (ctx0 ruled out because we never try both level0 and level2+ on the same coef)
+ TRELLIS_LOOP(1);
}
- memcpy( nodes_cur[0].cabac_state, &h->cabac.state[ coeff_abs_level_m1_offset[i_ctxBlockCat] ], 10 );
- for( i = i_last_nnz; i >= b_ac; i-- )
+ int level = bnode->level_idx;
+ for( i = b_ac; i <= last_nnz; i++ )
{
- int i_coef = abs_coefs[i];
- int q = ( f + i_coef * quant_mf[zigzag[i]] ) >> 16;
- int abs_level;
- int cost_sig[2], cost_last[2];
- trellis_node_t n;
+ dct[zigzag[i]] = SIGN(level_tree[level].abs_level, dct[zigzag[i]]);
+ level = level_tree[level].next;
+ }
- // skip 0s: this doesn't affect the output, but saves some unnecessary computation.
- if( q == 0 )
+ return 1;
+}
+
+/* FIXME: This is a gigantic hack. See below.
+ *
+ * CAVLC is much more difficult to trellis than CABAC.
+ *
+ * CABAC has only three states to track: significance map, last, and the
+ * level state machine.
+ * CAVLC, by comparison, has five: coeff_token (trailing + total),
+ * total_zeroes, zero_run, and the level state machine.
+ *
+ * I know of no paper that has managed to design a close-to-optimal trellis
+ * that covers all five of these and isn't exponential-time. As a result, this
+ * "trellis" isn't: it's just a QNS search. Patches welcome for something better.
+ * It's actually surprisingly fast, albeit not quite optimal. It's pretty close
+ * though; since CAVLC only has 2^16 possible rounding modes (assuming only two
+ * roundings as options), a bruteforce search is feasible. Testing shows
+ * that this QNS is reasonably close to optimal in terms of compression.
+ *
+ * TODO:
+ * Don't bother changing large coefficients when it wouldn't affect bit cost
+ * (e.g. only affecting bypassed suffix bits).
+ * Don't re-run all parts of CAVLC bit cost calculation when not necessary.
+ * e.g. when changing a coefficient from one non-zero value to another in
+ * such a way that trailing ones and suffix length isn't affected. */
+static ALWAYS_INLINE
+int quant_trellis_cavlc( x264_t *h, dctcoef *dct,
+ const udctcoef *quant_mf, const int *unquant_mf,
+ const uint8_t *zigzag, int ctx_block_cat, int lambda2, int b_ac,
+ int b_chroma, int dc, int num_coefs, int idx, int b_8x8 )
+{
+ ALIGNED_16( dctcoef quant_coefs[2][16] );
+ ALIGNED_16( dctcoef coefs[16] ) = {0};
+ const uint32_t *coef_weight1 = b_8x8 ? x264_dct8_weight_tab : x264_dct4_weight_tab;
+ const uint32_t *coef_weight2 = b_8x8 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
+ int delta_distortion[16];
+ int64_t score = 1ULL<<62;
+ int i, j;
+ const int f = 1<<15;
+ int nC = b_chroma && dc ? 3 + (num_coefs>>2)
+ : ct_index[x264_mb_predict_non_zero_code( h, !b_chroma && dc ? (idx - LUMA_DC)*16 : idx )];
+
+ /* Code for handling 8x8dct -> 4x4dct CAVLC munging. Input/output use a different
+ * step/start/end than internal processing. */
+ int step = 1;
+ int start = b_ac;
+ int end = num_coefs - 1;
+ if( b_8x8 )
+ {
+ start = idx&3;
+ end = 60 + start;
+ step = 4;
+ }
+ idx &= 15;
+
+ lambda2 <<= LAMBDA_BITS;
+
+ /* Find last non-zero coefficient. */
+ for( i = end; i >= start; i -= step )
+ if( (unsigned)(dct[zigzag[i]] * (dc?quant_mf[0]>>1:quant_mf[zigzag[i]]) + f-1) >= 2*f )
+ break;
+
+ if( i < start )
+ goto zeroblock;
+
+ /* Prepare for QNS search: calculate distortion caused by each DCT coefficient
+ * rounding to be searched.
+ *
+ * We only search two roundings (nearest and nearest-1) like in CABAC trellis,
+ * so we just store the difference in distortion between them. */
+ int last_nnz = b_8x8 ? i >> 2 : i;
+ int coef_mask = 0;
+ int round_mask = 0;
+ for( i = b_ac, j = start; i <= last_nnz; i++, j += step )
+ {
+ int coef = dct[zigzag[j]];
+ int abs_coef = abs(coef);
+ int sign = coef < 0 ? -1 : 1;
+ int nearest_quant = ( f + abs_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[j]]) ) >> 16;
+ quant_coefs[1][i] = quant_coefs[0][i] = sign * nearest_quant;
+ coefs[i] = quant_coefs[1][i];
+ if( nearest_quant )
{
- // no need to calculate ssd of 0s: it's the same in all nodes.
- // no need to modify level_tree for ctx=0: it starts with an infinite loop of 0s.
- const int cost_sig0 = x264_cabac_size_decision_noup( &cabac_state_sig[i], 0 )
- * i_lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
- for( j = 1; j < 8; j++ )
+ /* We initialize the trellis with a deadzone halfway between nearest rounding
+ * and always-round-down. This gives much better results than initializing to either
+ * extreme.
+ * FIXME: should we initialize to the deadzones used by deadzone quant? */
+ int deadzone_quant = ( f/2 + abs_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[j]]) ) >> 16;
+ int unquant1 = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[j]]) * (nearest_quant-0) + 128) >> 8);
+ int unquant0 = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[j]]) * (nearest_quant-1) + 128) >> 8);
+ int d1 = abs_coef - unquant1;
+ int d0 = abs_coef - unquant0;
+ delta_distortion[i] = (d0*d0 - d1*d1) * (dc?256:coef_weight2[zigzag[j]]);
+
+ /* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */
+ if( h->mb.i_psy_trellis && j && !dc && !b_chroma )
{
- if( nodes_cur[j].score != TRELLIS_SCORE_MAX )
- {
-#define SET_LEVEL(n,l) \
- level_tree[i_levels_used].abs_level = l; \
- level_tree[i_levels_used].next = n.level_idx; \
- n.level_idx = i_levels_used; \
- i_levels_used++;
-
- SET_LEVEL( nodes_cur[j], 0 );
- nodes_cur[j].score += cost_sig0;
- }
+ int orig_coef = b_8x8 ? h->mb.pic.fenc_dct8[idx>>2][zigzag[j]] : h->mb.pic.fenc_dct4[idx][zigzag[j]];
+ int predicted_coef = orig_coef - coef;
+ int psy_weight = coef_weight1[zigzag[j]];
+ int psy_value0 = h->mb.i_psy_trellis * abs(predicted_coef + unquant0 * sign);
+ int psy_value1 = h->mb.i_psy_trellis * abs(predicted_coef + unquant1 * sign);
+ delta_distortion[i] += (psy_value0 - psy_value1) * psy_weight;
}
- continue;
- }
-
- XCHG( trellis_node_t*, nodes_cur, nodes_prev );
-
- for( j = 0; j < 8; j++ )
- nodes_cur[j].score = TRELLIS_SCORE_MAX;
- if( i < i_coefs-1 )
- {
- cost_sig[0] = x264_cabac_size_decision_noup( &cabac_state_sig[i], 0 );
- cost_sig[1] = x264_cabac_size_decision_noup( &cabac_state_sig[i], 1 );
- cost_last[0] = x264_cabac_size_decision_noup( &cabac_state_last[i], 0 );
- cost_last[1] = x264_cabac_size_decision_noup( &cabac_state_last[i], 1 );
+ quant_coefs[0][i] = sign * (nearest_quant-1);
+ if( deadzone_quant != nearest_quant )
+ coefs[i] = quant_coefs[0][i];
+ else
+ round_mask |= 1 << i;
}
else
- {
- cost_sig[0] = cost_sig[1] = 0;
- cost_last[0] = cost_last[1] = 0;
- }
+ delta_distortion[i] = 0;
+ coef_mask |= (!!coefs[i]) << i;
+ }
- // there are a few cases where increasing the coeff magnitude helps,
- // but it's only around .003 dB, and skipping them ~doubles the speed of trellis.
- // could also try q-2: that sometimes helps, but also sometimes decimates blocks
- // that are better left coded, especially at QP > 40.
- for( abs_level = q; abs_level >= q-1; abs_level-- )
- {
- int d = i_coef - ((unquant_mf[zigzag[i]] * abs_level + 128) >> 8);
- uint64_t ssd = (int64_t)d*d * coef_weight[i];
+ /* Calculate the cost of the starting state. */
+ h->out.bs.i_bits_encoded = 0;
+ if( !coef_mask )
+ bs_write_vlc( &h->out.bs, x264_coeff0_token[nC] );
+ else
+ x264_cavlc_block_residual_internal( h, ctx_block_cat, coefs + b_ac, nC );
+ score = (int64_t)h->out.bs.i_bits_encoded * lambda2;
- for( j = 0; j < 8; j++ )
+ /* QNS loop: pick the change that improves RD the most, apply it, repeat.
+ * coef_mask and round_mask are used to simplify tracking of nonzeroness
+ * and rounding modes chosen. */
+ while( 1 )
+ {
+ int64_t iter_score = score;
+ int iter_distortion_delta = 0;
+ int iter_coef = -1;
+ int iter_mask = coef_mask;
+ int iter_round = round_mask;
+ for( i = b_ac; i <= last_nnz; i++ )
+ {
+ if( !delta_distortion[i] )
+ continue;
+
+ /* Set up all the variables for this iteration. */
+ int cur_round = round_mask ^ (1 << i);
+ int round_change = (cur_round >> i)&1;
+ int old_coef = coefs[i];
+ int new_coef = quant_coefs[round_change][i];
+ int cur_mask = (coef_mask&~(1 << i))|(!!new_coef << i);
+ int cur_distortion_delta = delta_distortion[i] * (round_change ? -1 : 1);
+ int64_t cur_score = cur_distortion_delta;
+ coefs[i] = new_coef;
+
+ /* Count up bits. */
+ h->out.bs.i_bits_encoded = 0;
+ if( !cur_mask )
+ bs_write_vlc( &h->out.bs, x264_coeff0_token[nC] );
+ else
+ x264_cavlc_block_residual_internal( h, ctx_block_cat, coefs + b_ac, nC );
+ cur_score += (int64_t)h->out.bs.i_bits_encoded * lambda2;
+
+ coefs[i] = old_coef;
+ if( cur_score < iter_score )
{
- int node_ctx = j;
- if( nodes_prev[j].score == TRELLIS_SCORE_MAX )
- continue;
- n = nodes_prev[j];
-
- /* code the proposed level, and count how much entropy it would take */
- if( abs_level || node_ctx )
- {
- unsigned f8_bits = cost_sig[ abs_level != 0 ];
- if( abs_level )
- {
- const int i_prefix = X264_MIN( abs_level - 1, 14 );
- f8_bits += cost_last[ node_ctx == 0 ];
- f8_bits += x264_cabac_size_decision2( &n.cabac_state[coeff_abs_level1_ctx[node_ctx]], i_prefix > 0 );
- if( i_prefix > 0 )
- {
- uint8_t *ctx = &n.cabac_state[coeff_abs_levelgt1_ctx[node_ctx]];
- f8_bits += cabac_prefix_size[i_prefix][*ctx];
- *ctx = cabac_prefix_transition[i_prefix][*ctx];
- if( abs_level >= 15 )
- f8_bits += bs_size_ue( abs_level - 15 ) << CABAC_SIZE_BITS;
- node_ctx = coeff_abs_level_transition[1][node_ctx];
- }
- else
- {
- f8_bits += 1 << CABAC_SIZE_BITS;
- node_ctx = coeff_abs_level_transition[0][node_ctx];
- }
- }
- n.score += (uint64_t)f8_bits * i_lambda2 >> ( CABAC_SIZE_BITS - LAMBDA_BITS );
- }
-
- n.score += ssd;
-
- /* save the node if it's better than any existing node with the same cabac ctx */
- if( n.score < nodes_cur[node_ctx].score )
- {
- SET_LEVEL( n, abs_level );
- nodes_cur[node_ctx] = n;
- }
+ iter_score = cur_score;
+ iter_coef = i;
+ iter_mask = cur_mask;
+ iter_round = cur_round;
+ iter_distortion_delta = cur_distortion_delta;
}
}
+ if( iter_coef >= 0 )
+ {
+ score = iter_score - iter_distortion_delta;
+ coef_mask = iter_mask;
+ round_mask = iter_round;
+ coefs[iter_coef] = quant_coefs[((round_mask >> iter_coef)&1)][iter_coef];
+ /* Don't try adjusting coefficients we've already adjusted.
+ * Testing suggests this doesn't hurt results -- and sometimes actually helps. */
+ delta_distortion[iter_coef] = 0;
+ }
+ else
+ break;
}
- /* output levels from the best path through the trellis */
- bnode = &nodes_cur[0];
- for( j = 1; j < 8; j++ )
- if( nodes_cur[j].score < bnode->score )
- bnode = &nodes_cur[j];
+ if( coef_mask )
+ {
+ for( i = b_ac, j = start; i < num_coefs; i++, j += step )
+ dct[zigzag[j]] = coefs[i];
+ return 1;
+ }
- j = bnode->level_idx;
- for( i = b_ac; i < i_coefs; i++ )
+zeroblock:
+ if( !dc )
{
- dct[zigzag[i]] = level_tree[j].abs_level * signs[i];
- j = level_tree[j].next;
+ if( b_8x8 )
+ for( i = start; i <= end; i+=step )
+ dct[zigzag[i]] = 0;
+ else
+ memset( dct, 0, 16*sizeof(dctcoef) );
}
+ return 0;
}
-
-void x264_quant_4x4_trellis( x264_t *h, int16_t dct[4][4], int i_quant_cat,
- int i_qp, int i_ctxBlockCat, int b_intra )
+int x264_quant_luma_dc_trellis( x264_t *h, dctcoef *dct, int i_quant_cat, int i_qp, int ctx_block_cat, int b_intra, int idx )
{
- const int i_qbits = i_qp / 6;
- const int i_mf = i_qp % 6;
- const int b_ac = (i_ctxBlockCat == DCT_LUMA_AC);
- /* should the lambdas be different? I'm just matching the behaviour of deadzone quant. */
- const int i_lambda_mult = b_intra ? 65 : 85;
- const int i_lambda2 = ((lambda2_tab[i_mf] * i_lambda_mult*i_lambda_mult / 10000)
- << (2*i_qbits)) >> LAMBDA_BITS;
-
- quant_trellis_cabac( h, (int16_t*)dct,
- h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
- x264_dct4_weight2_zigzag[h->mb.b_interlaced],
- x264_zigzag_scan4[h->mb.b_interlaced],
- i_ctxBlockCat, 15+i_qbits, i_lambda2, b_ac, 16 );
+ if( h->param.b_cabac )
+ return quant_trellis_cabac( h, dct,
+ h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias0[i_quant_cat][i_qp],
+ h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED],
+ ctx_block_cat, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 1, 16, idx );
+
+ return quant_trellis_cavlc( h, dct,
+ h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED],
+ DCT_LUMA_DC, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 1, 16, idx, 0 );
}
+static const uint8_t x264_zigzag_scan2x2[4] = { 0, 1, 2, 3 };
+static const uint8_t x264_zigzag_scan2x4[8] = { 0, 2, 1, 4, 6, 3, 5, 7 };
-void x264_quant_8x8_trellis( x264_t *h, int16_t dct[8][8], int i_quant_cat,
- int i_qp, int b_intra )
+int x264_quant_chroma_dc_trellis( x264_t *h, dctcoef *dct, int i_qp, int b_intra, int idx )
{
- const int i_qbits = i_qp / 6;
- const int i_mf = i_qp % 6;
- const int i_lambda_mult = b_intra ? 65 : 85;
- const int i_lambda2 = ((lambda2_tab[i_mf] * i_lambda_mult*i_lambda_mult / 10000)
- << (2*i_qbits)) >> LAMBDA_BITS;
-
- quant_trellis_cabac( h, (int16_t*)dct,
- h->quant8_mf[i_quant_cat][i_qp], h->unquant8_mf[i_quant_cat][i_qp],
- x264_dct8_weight2_zigzag[h->mb.b_interlaced],
- x264_zigzag_scan8[h->mb.b_interlaced],
- DCT_LUMA_8x8, 16+i_qbits, i_lambda2, 0, 64 );
+ const uint8_t *zigzag;
+ int num_coefs;
+ int quant_cat = CQM_4IC+1 - b_intra;
+
+ if( CHROMA_FORMAT == CHROMA_422 )
+ {
+ zigzag = x264_zigzag_scan2x4;
+ num_coefs = 8;
+ }
+ else
+ {
+ zigzag = x264_zigzag_scan2x2;
+ num_coefs = 4;
+ }
+
+ if( h->param.b_cabac )
+ return quant_trellis_cabac( h, dct,
+ h->quant4_mf[quant_cat][i_qp], h->quant4_bias0[quant_cat][i_qp],
+ h->unquant4_mf[quant_cat][i_qp], zigzag,
+ DCT_CHROMA_DC, h->mb.i_trellis_lambda2[1][b_intra], 0, 1, 1, num_coefs, idx );
+
+ return quant_trellis_cavlc( h, dct,
+ h->quant4_mf[quant_cat][i_qp], h->unquant4_mf[quant_cat][i_qp], zigzag,
+ DCT_CHROMA_DC, h->mb.i_trellis_lambda2[1][b_intra], 0, 1, 1, num_coefs, idx, 0 );
}
+int x264_quant_4x4_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
+ int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx )
+{
+ static const uint8_t ctx_ac[14] = {0,1,0,0,1,0,0,1,0,0,0,1,0,0};
+ int b_ac = ctx_ac[ctx_block_cat];
+ if( h->param.b_cabac )
+ return quant_trellis_cabac( h, dct,
+ h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias0[i_quant_cat][i_qp],
+ h->unquant4_mf[i_quant_cat][i_qp], x264_zigzag_scan4[MB_INTERLACED],
+ ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, b_chroma, 0, 16, idx );
+
+ return quant_trellis_cavlc( h, dct,
+ h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
+ x264_zigzag_scan4[MB_INTERLACED],
+ ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, b_chroma, 0, 16, idx, 0 );
+}
+
+int x264_quant_8x8_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
+ int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx )
+{
+ if( h->param.b_cabac )
+ {
+ return quant_trellis_cabac( h, dct,
+ h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias0[i_quant_cat][i_qp],
+ h->unquant8_mf[i_quant_cat][i_qp], x264_zigzag_scan8[MB_INTERLACED],
+ ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 0, 64, idx );
+ }
+
+ /* 8x8 CAVLC is split into 4 4x4 blocks */
+ int nzaccum = 0;
+ for( int i = 0; i < 4; i++ )
+ {
+ int nz = quant_trellis_cavlc( h, dct,
+ h->quant8_mf[i_quant_cat][i_qp], h->unquant8_mf[i_quant_cat][i_qp],
+ x264_zigzag_scan8[MB_INTERLACED],
+ DCT_LUMA_4x4, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 0, 16, idx*4+i, 1 );
+ /* Set up nonzero count for future calls */
+ h->mb.cache.non_zero_count[x264_scan8[idx*4+i]] = nz;
+ nzaccum |= nz;
+ }
+ STORE_8x8_NNZ( 0, idx, 0 );
+ return nzaccum;
+}