/*****************************************************************************
- * rdo.c: h264 encoder library (rate-distortion optimization)
+ * rdo.c: rate-distortion optimization
*****************************************************************************
- * Copyright (C) 2005-2008 x264 project
+ * Copyright (C) 2005-2011 x264 project
*
* Authors: Loren Merritt <lorenm@u.washington.edu>
* Fiona Glaser <fiona@x264.com>
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
+ *
+ * This program is also available under a commercial proprietary license.
+ * For more information, contact us at licensing@x264.com.
*****************************************************************************/
/* duplicate all the writer functions, just calculating bit cost
* fractional bits, but only finite precision. */
#undef x264_cabac_encode_decision
#undef x264_cabac_encode_decision_noup
+#undef x264_cabac_encode_bypass
+#undef x264_cabac_encode_terminal
#define x264_cabac_encode_decision(c,x,v) x264_cabac_size_decision(c,x,v)
#define x264_cabac_encode_decision_noup(c,x,v) x264_cabac_size_decision_noup(c,x,v)
#define x264_cabac_encode_terminal(c) ((c)->f8_bits_encoded += 7)
#include "cabac.c"
#define COPY_CABAC h->mc.memcpy_aligned( &cabac_tmp.f8_bits_encoded, &h->cabac.f8_bits_encoded, \
- sizeof(x264_cabac_t) - offsetof(x264_cabac_t,f8_bits_encoded) )
+ sizeof(x264_cabac_t) - offsetof(x264_cabac_t,f8_bits_encoded) - (CHROMA444 ? 0 : (1024+12)-460) )
#define COPY_CABAC_PART( pos, size )\
memcpy( &cb->state[pos], &h->cabac.state[pos], size )
-static ALWAYS_INLINE uint64_t cached_hadamard( x264_t *h, int pixel, int x, int y )
+static ALWAYS_INLINE uint64_t cached_hadamard( x264_t *h, int size, int x, int y )
{
static const uint8_t hadamard_shift_x[4] = {4, 4, 3, 3};
static const uint8_t hadamard_shift_y[4] = {4-0, 3-0, 4-1, 3-1};
static const uint8_t hadamard_offset[4] = {0, 1, 3, 5};
- int cache_index = (x >> hadamard_shift_x[pixel]) + (y >> hadamard_shift_y[pixel])
- + hadamard_offset[pixel];
+ int cache_index = (x >> hadamard_shift_x[size]) + (y >> hadamard_shift_y[size])
+ + hadamard_offset[size];
uint64_t res = h->mb.pic.fenc_hadamard_cache[cache_index];
if( res )
return res - 1;
else
{
- uint8_t *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
- res = h->pixf.hadamard_ac[pixel]( fenc, FENC_STRIDE );
+ pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
+ res = h->pixf.hadamard_ac[size]( fenc, FENC_STRIDE );
h->mb.pic.fenc_hadamard_cache[cache_index] = res + 1;
return res;
}
}
-static ALWAYS_INLINE int cached_satd( x264_t *h, int pixel, int x, int y )
+static ALWAYS_INLINE int cached_satd( x264_t *h, int size, int x, int y )
{
static const uint8_t satd_shift_x[3] = {3, 2, 2};
static const uint8_t satd_shift_y[3] = {2-1, 3-2, 2-2};
static const uint8_t satd_offset[3] = {0, 8, 16};
- ALIGNED_16( static uint8_t zero[16] );
- int cache_index = (x >> satd_shift_x[pixel - PIXEL_8x4]) + (y >> satd_shift_y[pixel - PIXEL_8x4])
- + satd_offset[pixel - PIXEL_8x4];
+ ALIGNED_16( static pixel zero[16] ) = {0};
+ int cache_index = (x >> satd_shift_x[size - PIXEL_8x4]) + (y >> satd_shift_y[size - PIXEL_8x4])
+ + satd_offset[size - PIXEL_8x4];
int res = h->mb.pic.fenc_satd_cache[cache_index];
if( res )
return res - 1;
else
{
- uint8_t *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
- int dc = h->pixf.sad[pixel]( fenc, FENC_STRIDE, zero, 0 ) >> 1;
- res = h->pixf.satd[pixel]( fenc, FENC_STRIDE, zero, 0 ) - dc;
+ pixel *fenc = h->mb.pic.p_fenc[0] + x + y*FENC_STRIDE;
+ int dc = h->pixf.sad[size]( fenc, FENC_STRIDE, zero, 0 ) >> 1;
+ res = h->pixf.satd[size]( fenc, FENC_STRIDE, zero, 0 ) - dc;
h->mb.pic.fenc_satd_cache[cache_index] = res + 1;
return res;
}
static inline int ssd_plane( x264_t *h, int size, int p, int x, int y )
{
- ALIGNED_16(static uint8_t zero[16]);
+ ALIGNED_16( static pixel zero[16] ) = {0};
int satd = 0;
- uint8_t *fdec = h->mb.pic.p_fdec[p] + x + y*FDEC_STRIDE;
- uint8_t *fenc = h->mb.pic.p_fenc[p] + x + y*FENC_STRIDE;
+ pixel *fdec = h->mb.pic.p_fdec[p] + x + y*FDEC_STRIDE;
+ pixel *fenc = h->mb.pic.p_fenc[p] + x + y*FENC_STRIDE;
if( p == 0 && h->mb.i_psy_rd )
{
/* If the plane is smaller than 8x8, we can't do an SA8D; this probably isn't a big problem. */
static inline int ssd_mb( x264_t *h )
{
- int chromassd = ssd_plane(h, PIXEL_8x8, 1, 0, 0) + ssd_plane(h, PIXEL_8x8, 2, 0, 0);
- chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
- return ssd_plane(h, PIXEL_16x16, 0, 0, 0) + chromassd;
+ int chroma_size = CHROMA444 ? PIXEL_16x16 : PIXEL_8x8;
+ int chroma_ssd = ssd_plane(h, chroma_size, 1, 0, 0) + ssd_plane(h, chroma_size, 2, 0, 0);
+ chroma_ssd = ((uint64_t)chroma_ssd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
+ return ssd_plane(h, PIXEL_16x16, 0, 0, 0) + chroma_ssd;
}
static int x264_rd_cost_mb( x264_t *h, int i_lambda2 )
x264_macroblock_encode( h );
+ if( h->mb.b_deblock_rdo )
+ x264_macroblock_deblock( h );
+
i_ssd = ssd_mb( h );
if( IS_SKIP( h->mb.i_type ) )
return i_ssd + i_bits;
}
-/* For small partitions (i.e. those using at most one DCT category's worth of CABAC states),
- * it's faster to copy the individual parts than to perform a whole CABAC_COPY. */
-static ALWAYS_INLINE void x264_copy_cabac_part( x264_t *h, x264_cabac_t *cb, int cat, int intra )
-{
- if( intra )
- COPY_CABAC_PART( 68, 2 ); //intra pred mode
- else
- COPY_CABAC_PART( 40, 16 ); //mvd, rounded up to 16 bytes
-
- /* 8x8dct writes CBP, while non-8x8dct writes CBF */
- if( cat != DCT_LUMA_8x8 )
- COPY_CABAC_PART( 85 + cat * 4, 4 );
- else
- COPY_CABAC_PART( 73, 4 );
-
- /* Really should be 15 bytes, but rounding up a byte saves some
- * instructions and is faster, and copying extra data doesn't hurt. */
- COPY_CABAC_PART( significant_coeff_flag_offset[h->mb.b_interlaced][cat], 16 );
- COPY_CABAC_PART( last_coeff_flag_offset[h->mb.b_interlaced][cat], 16 );
- COPY_CABAC_PART( coeff_abs_level_m1_offset[cat], 10 );
- cb->f8_bits_encoded = 0;
-}
-
/* partition RD functions use 8 bits more precision to avoid large rounding errors at low QPs */
static uint64_t x264_rd_cost_subpart( x264_t *h, int i_lambda2, int i4, int i_pixel )
x264_macroblock_encode_p4x4( h, i4+2 );
i_ssd = ssd_plane( h, i_pixel, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
+ if( CHROMA444 )
+ {
+ int chromassd = ssd_plane( h, i_pixel, 1, block_idx_x[i4]*4, block_idx_y[i4]*4 )
+ + ssd_plane( h, i_pixel, 2, block_idx_x[i4]*4, block_idx_y[i4]*4 );
+ chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
+ i_ssd += chromassd;
+ }
if( h->param.b_cabac )
{
x264_cabac_t cabac_tmp;
- x264_copy_cabac_part( h, &cabac_tmp, DCT_LUMA_4x4, 0 );
+ COPY_CABAC;
x264_subpartition_size_cabac( h, &cabac_tmp, i4, i_pixel );
i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
}
if( i_pixel == PIXEL_8x16 )
x264_macroblock_encode_p8x8( h, i8+2 );
- chromassd = ssd_plane( h, i_pixel+3, 1, (i8&1)*4, (i8>>1)*4 )
- + ssd_plane( h, i_pixel+3, 2, (i8&1)*4, (i8>>1)*4 );
+ i_ssd = ssd_plane( h, i_pixel, 0, (i8&1)*8, (i8>>1)*8 );
+ if( CHROMA444 )
+ {
+ chromassd = ssd_plane( h, i_pixel, 1, (i8&1)*8, (i8>>1)*8 )
+ + ssd_plane( h, i_pixel, 2, (i8&1)*8, (i8>>1)*8 );
+ }
+ else
+ {
+ chromassd = ssd_plane( h, i_pixel+3, 1, (i8&1)*4, (i8>>1)*4 )
+ + ssd_plane( h, i_pixel+3, 2, (i8&1)*4, (i8>>1)*4 );
+ }
chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
- i_ssd = ssd_plane( h, i_pixel, 0, (i8&1)*8, (i8>>1)*8 ) + chromassd;
+ i_ssd += chromassd;
if( h->param.b_cabac )
{
return (i_ssd<<8) + i_bits;
}
-static uint64_t x264_rd_cost_i8x8( x264_t *h, int i_lambda2, int i8, int i_mode )
+static uint64_t x264_rd_cost_i8x8( x264_t *h, int i_lambda2, int i8, int i_mode, pixel edge[3][48] )
{
uint64_t i_ssd, i_bits;
+ int plane_count = CHROMA444 ? 3 : 1;
+ int i_qp = h->mb.i_qp;
h->mb.i_cbp_luma &= ~(1<<i8);
h->mb.b_transform_8x8 = 1;
- x264_mb_encode_i8x8( h, i8, h->mb.i_qp );
+ for( int p = 0; p < plane_count; p++ )
+ {
+ x264_mb_encode_i8x8( h, p, i8, i_qp, i_mode, edge[p] );
+ i_qp = h->mb.i_chroma_qp;
+ }
+
i_ssd = ssd_plane( h, PIXEL_8x8, 0, (i8&1)*8, (i8>>1)*8 );
+ if( CHROMA444 )
+ {
+ int chromassd = ssd_plane( h, PIXEL_8x8, 1, (i8&1)*8, (i8>>1)*8 )
+ + ssd_plane( h, PIXEL_8x8, 2, (i8&1)*8, (i8>>1)*8 );
+ chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
+ i_ssd += chromassd;
+ }
if( h->param.b_cabac )
{
x264_cabac_t cabac_tmp;
- x264_copy_cabac_part( h, &cabac_tmp, DCT_LUMA_8x8, 1 );
+ COPY_CABAC;
x264_partition_i8x8_size_cabac( h, &cabac_tmp, i8, i_mode );
i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
}
static uint64_t x264_rd_cost_i4x4( x264_t *h, int i_lambda2, int i4, int i_mode )
{
uint64_t i_ssd, i_bits;
+ int plane_count = CHROMA444 ? 3 : 1;
+ int i_qp = h->mb.i_qp;
+
+ for( int p = 0; p < plane_count; p++ )
+ {
+ x264_mb_encode_i4x4( h, p, i4, i_qp, i_mode );
+ i_qp = h->mb.i_chroma_qp;
+ }
- x264_mb_encode_i4x4( h, i4, h->mb.i_qp );
i_ssd = ssd_plane( h, PIXEL_4x4, 0, block_idx_x[i4]*4, block_idx_y[i4]*4 );
+ if( CHROMA444 )
+ {
+ int chromassd = ssd_plane( h, PIXEL_4x4, 1, block_idx_x[i4]*4, block_idx_y[i4]*4 )
+ + ssd_plane( h, PIXEL_4x4, 2, block_idx_x[i4]*4, block_idx_y[i4]*4 );
+ chromassd = ((uint64_t)chromassd * h->mb.i_chroma_lambda2_offset + 128) >> 8;
+ i_ssd += chromassd;
+ }
if( h->param.b_cabac )
{
x264_cabac_t cabac_tmp;
- x264_copy_cabac_part( h, &cabac_tmp, DCT_LUMA_4x4, 1 );
+ COPY_CABAC;
x264_partition_i4x4_size_cabac( h, &cabac_tmp, i4, i_mode );
i_bits = ( (uint64_t)cabac_tmp.f8_bits_encoded * i_lambda2 + 128 ) >> 8;
}
}
}
-typedef struct {
+typedef struct
+{
int64_t score;
int level_idx; // index into level_tree[]
uint8_t cabac_state[10]; //just the contexts relevant to coding abs_level_m1
// comparable to the input. so unquant is the direct inverse of quant,
// and uses the dct scaling factors, not the idct ones.
-static ALWAYS_INLINE int quant_trellis_cabac( x264_t *h, int16_t *dct,
- const uint16_t *quant_mf, const int *unquant_mf,
- const int *coef_weight, const uint8_t *zigzag,
- int i_ctxBlockCat, int i_lambda2, int b_ac, int dc, int i_coefs, int idx )
+static ALWAYS_INLINE
+int quant_trellis_cabac( x264_t *h, dctcoef *dct,
+ const udctcoef *quant_mf, const int *unquant_mf,
+ const int *coef_weight, const uint8_t *zigzag,
+ int ctx_block_cat, int i_lambda2, int b_ac,
+ int b_chroma, int dc, int i_coefs, int idx )
{
int abs_coefs[64], signs[64];
trellis_node_t nodes[2][8];
trellis_node_t *nodes_cur = nodes[0];
trellis_node_t *nodes_prev = nodes[1];
trellis_node_t *bnode;
- const int b_interlaced = h->mb.b_interlaced;
- uint8_t *cabac_state_sig = &h->cabac.state[ significant_coeff_flag_offset[b_interlaced][i_ctxBlockCat] ];
- uint8_t *cabac_state_last = &h->cabac.state[ last_coeff_flag_offset[b_interlaced][i_ctxBlockCat] ];
+ const int b_interlaced = MB_INTERLACED;
+ uint8_t *cabac_state_sig = &h->cabac.state[ significant_coeff_flag_offset[b_interlaced][ctx_block_cat] ];
+ uint8_t *cabac_state_last = &h->cabac.state[ last_coeff_flag_offset[b_interlaced][ctx_block_cat] ];
const int f = 1 << 15; // no deadzone
int i_last_nnz;
int i;
// (# of coefs) * (# of ctx) * (# of levels tried) = 1024
// we don't need to keep all of those: (# of coefs) * (# of ctx) would be enough,
// but it takes more time to remove dead states than you gain in reduced memory.
- struct {
+ struct
+ {
uint16_t abs_level;
uint16_t next;
} level_tree[64*8*2];
if( i < b_ac )
{
- /* We only need to memset an empty 4x4 block. 8x8 can be
+ /* We only need to zero an empty 4x4 block. 8x8 can be
implicitly emptied via zero nnz, as can dc. */
if( i_coefs == 16 && !dc )
- memset( dct, 0, 16 * sizeof(int16_t) );
+ memset( dct, 0, 16 * sizeof(dctcoef) );
return 0;
}
i_last_nnz = i;
+ idx &= i_coefs == 64 ? 3 : 15;
for( ; i >= b_ac; i-- )
{
// in 8x8 blocks, some positions share contexts, so we'll just have to hope that
// cabac isn't too sensitive.
- memcpy( nodes_cur[0].cabac_state, &h->cabac.state[ coeff_abs_level_m1_offset[i_ctxBlockCat] ], 10 );
+ memcpy( nodes_cur[0].cabac_state, &h->cabac.state[ coeff_abs_level_m1_offset[ctx_block_cat] ], 10 );
for( i = i_last_nnz; i >= b_ac; i-- )
{
int d = i_coef - unquant_abs_level;
int64_t ssd;
/* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */
- if( h->mb.i_psy_trellis && i && !dc && i_ctxBlockCat != DCT_CHROMA_AC )
+ if( h->mb.i_psy_trellis && i && !dc && !b_chroma )
{
int orig_coef = (i_coefs == 64) ? h->mb.pic.fenc_dct8[idx][zigzag[i]] : h->mb.pic.fenc_dct4[idx][zigzag[i]];
int predicted_coef = orig_coef - i_coef * signs[i];
if( bnode == &nodes_cur[0] )
{
if( i_coefs == 16 && !dc )
- memset( dct, 0, 16 * sizeof(int16_t) );
+ memset( dct, 0, 16 * sizeof(dctcoef) );
return 0;
}
return 1;
}
+/* FIXME: This is a gigantic hack. See below.
+ *
+ * CAVLC is much more difficult to trellis than CABAC.
+ *
+ * CABAC has only three states to track: significance map, last, and the
+ * level state machine.
+ * CAVLC, by comparison, has five: coeff_token (trailing + total),
+ * total_zeroes, zero_run, and the level state machine.
+ *
+ * I know of no paper that has managed to design a close-to-optimal trellis
+ * that covers all five of these and isn't exponential-time. As a result, this
+ * "trellis" isn't: it's just a QNS search. Patches welcome for something better.
+ * It's actually surprisingly fast, albeit not quite optimal. It's pretty close
+ * though; since CAVLC only has 2^16 possible rounding modes (assuming only two
+ * roundings as options), a bruteforce search is feasible. Testing shows
+ * that this QNS is reasonably close to optimal in terms of compression.
+ *
+ * TODO:
+ * Don't bother changing large coefficients when it wouldn't affect bit cost
+ * (e.g. only affecting bypassed suffix bits).
+ * Don't re-run all parts of CAVLC bit cost calculation when not necessary.
+ * e.g. when changing a coefficient from one non-zero value to another in
+ * such a way that trailing ones and suffix length isn't affected. */
+static ALWAYS_INLINE
+int quant_trellis_cavlc( x264_t *h, dctcoef *dct,
+ const udctcoef *quant_mf, const int *unquant_mf,
+ const int *coef_weight, const uint8_t *zigzag,
+ int ctx_block_cat, int i_lambda2, int b_ac,
+ int b_chroma, int dc, int i_coefs, int idx, int b_8x8 )
+{
+ ALIGNED_16( dctcoef quant_coefs[2][16] );
+ ALIGNED_16( dctcoef coefs[16] ) = {0};
+ int delta_distortion[16];
+ int64_t score = 1ULL<<62;
+ int i, j;
+ const int f = 1<<15;
+ int nC = ctx_block_cat == DCT_CHROMA_DC ? 4 : ct_index[x264_mb_predict_non_zero_code( h, ctx_block_cat == DCT_LUMA_DC ? (idx - LUMA_DC)*16 : idx )];
+
+ /* Code for handling 8x8dct -> 4x4dct CAVLC munging. Input/output use a different
+ * step/start/end than internal processing. */
+ int step = 1;
+ int start = b_ac;
+ int end = i_coefs - 1;
+ if( b_8x8 )
+ {
+ start = idx&3;
+ end = 60 + start;
+ step = 4;
+ }
+ idx &= 15;
+
+ i_lambda2 <<= LAMBDA_BITS;
+
+ /* Find last non-zero coefficient. */
+ for( i = end; i >= start; i -= step )
+ if( (unsigned)(dct[zigzag[i]] * (dc?quant_mf[0]>>1:quant_mf[zigzag[i]]) + f-1) >= 2*f )
+ break;
+
+ if( i < start )
+ goto zeroblock;
+
+ /* Prepare for QNS search: calculate distortion caused by each DCT coefficient
+ * rounding to be searched.
+ *
+ * We only search two roundings (nearest and nearest-1) like in CABAC trellis,
+ * so we just store the difference in distortion between them. */
+ int i_last_nnz = b_8x8 ? i >> 2 : i;
+ int coef_mask = 0;
+ int round_mask = 0;
+ for( i = b_ac, j = start; i <= i_last_nnz; i++, j += step )
+ {
+ int coef = dct[zigzag[j]];
+ int abs_coef = abs(coef);
+ int sign = coef < 0 ? -1 : 1;
+ int nearest_quant = ( f + abs_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[j]]) ) >> 16;
+ quant_coefs[1][i] = quant_coefs[0][i] = sign * nearest_quant;
+ coefs[i] = quant_coefs[1][i];
+ if( nearest_quant )
+ {
+ /* We initialize the trellis with a deadzone halfway between nearest rounding
+ * and always-round-down. This gives much better results than initializing to either
+ * extreme.
+ * FIXME: should we initialize to the deadzones used by deadzone quant? */
+ int deadzone_quant = ( f/2 + abs_coef * (dc?quant_mf[0]>>1:quant_mf[zigzag[j]]) ) >> 16;
+ int unquant1 = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[j]]) * (nearest_quant-0) + 128) >> 8);
+ int unquant0 = (((dc?unquant_mf[0]<<1:unquant_mf[zigzag[j]]) * (nearest_quant-1) + 128) >> 8);
+ int d1 = abs_coef - unquant1;
+ int d0 = abs_coef - unquant0;
+ delta_distortion[i] = (d0*d0 - d1*d1) * (dc?256:coef_weight[j]);
+
+ /* Psy trellis: bias in favor of higher AC coefficients in the reconstructed frame. */
+ if( h->mb.i_psy_trellis && j && !dc && !b_chroma )
+ {
+ int orig_coef = b_8x8 ? h->mb.pic.fenc_dct8[idx>>2][zigzag[j]] : h->mb.pic.fenc_dct4[idx][zigzag[j]];
+ int predicted_coef = orig_coef - coef;
+ int psy_weight = b_8x8 ? x264_dct8_weight_tab[zigzag[j]] : x264_dct4_weight_tab[zigzag[j]];
+ int psy_value0 = h->mb.i_psy_trellis * abs(predicted_coef + unquant0 * sign);
+ int psy_value1 = h->mb.i_psy_trellis * abs(predicted_coef + unquant1 * sign);
+ delta_distortion[i] += (psy_value0 - psy_value1) * psy_weight;
+ }
+
+ quant_coefs[0][i] = sign * (nearest_quant-1);
+ if( deadzone_quant != nearest_quant )
+ coefs[i] = quant_coefs[0][i];
+ else
+ round_mask |= 1 << i;
+ }
+ else
+ delta_distortion[i] = 0;
+ coef_mask |= (!!coefs[i]) << i;
+ }
+
+ /* Calculate the cost of the starting state. */
+ h->out.bs.i_bits_encoded = 0;
+ if( !coef_mask )
+ bs_write_vlc( &h->out.bs, x264_coeff0_token[nC] );
+ else
+ block_residual_write_cavlc_internal( h, ctx_block_cat, coefs + b_ac, nC );
+ score = (int64_t)h->out.bs.i_bits_encoded * i_lambda2;
+
+ /* QNS loop: pick the change that improves RD the most, apply it, repeat.
+ * coef_mask and round_mask are used to simplify tracking of nonzeroness
+ * and rounding modes chosen. */
+ while( 1 )
+ {
+ int64_t iter_score = score;
+ int iter_distortion_delta = 0;
+ int iter_coef = -1;
+ int iter_mask = coef_mask;
+ int iter_round = round_mask;
+ for( i = b_ac; i <= i_last_nnz; i++ )
+ {
+ if( !delta_distortion[i] )
+ continue;
+
+ /* Set up all the variables for this iteration. */
+ int cur_round = round_mask ^ (1 << i);
+ int round_change = (cur_round >> i)&1;
+ int old_coef = coefs[i];
+ int new_coef = quant_coefs[round_change][i];
+ int cur_mask = (coef_mask&~(1 << i))|(!!new_coef << i);
+ int cur_distortion_delta = delta_distortion[i] * (round_change ? -1 : 1);
+ int64_t cur_score = cur_distortion_delta;
+ coefs[i] = new_coef;
+
+ /* Count up bits. */
+ h->out.bs.i_bits_encoded = 0;
+ if( !cur_mask )
+ bs_write_vlc( &h->out.bs, x264_coeff0_token[nC] );
+ else
+ block_residual_write_cavlc_internal( h, ctx_block_cat, coefs + b_ac, nC );
+ cur_score += (int64_t)h->out.bs.i_bits_encoded * i_lambda2;
+
+ coefs[i] = old_coef;
+ if( cur_score < iter_score )
+ {
+ iter_score = cur_score;
+ iter_coef = i;
+ iter_mask = cur_mask;
+ iter_round = cur_round;
+ iter_distortion_delta = cur_distortion_delta;
+ }
+ }
+ if( iter_coef >= 0 )
+ {
+ score = iter_score - iter_distortion_delta;
+ coef_mask = iter_mask;
+ round_mask = iter_round;
+ coefs[iter_coef] = quant_coefs[((round_mask >> iter_coef)&1)][iter_coef];
+ /* Don't try adjusting coefficients we've already adjusted.
+ * Testing suggests this doesn't hurt results -- and sometimes actually helps. */
+ delta_distortion[iter_coef] = 0;
+ }
+ else
+ break;
+ }
+
+ if( coef_mask )
+ {
+ for( i = b_ac, j = start; i <= i_last_nnz; i++, j += step )
+ dct[zigzag[j]] = coefs[i];
+ for( ; j <= end; j += step )
+ dct[zigzag[j]] = 0;
+ return 1;
+ }
+
+zeroblock:
+ if( !dc )
+ {
+ if( b_8x8 )
+ for( i = start; i <= end; i+=step )
+ dct[zigzag[i]] = 0;
+ else
+ memset( dct, 0, 16*sizeof(dctcoef) );
+ }
+ return 0;
+}
+
const static uint8_t x264_zigzag_scan2[4] = {0,1,2,3};
-int x264_quant_dc_trellis( x264_t *h, int16_t *dct, int i_quant_cat,
- int i_qp, int i_ctxBlockCat, int b_intra, int b_chroma )
+int x264_quant_dc_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
+ int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx )
{
- return quant_trellis_cabac( h, dct,
+ if( h->param.b_cabac )
+ return quant_trellis_cabac( h, dct,
+ h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
+ NULL, ctx_block_cat==DCT_CHROMA_DC ? x264_zigzag_scan2 : x264_zigzag_scan4[MB_INTERLACED],
+ ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 1, ctx_block_cat==DCT_CHROMA_DC ? 4 : 16, idx );
+
+ if( ctx_block_cat != DCT_CHROMA_DC )
+ ctx_block_cat = DCT_LUMA_DC;
+
+ return quant_trellis_cavlc( h, dct,
h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
- NULL, i_ctxBlockCat==DCT_CHROMA_DC ? x264_zigzag_scan2 : x264_zigzag_scan4[h->mb.b_interlaced],
- i_ctxBlockCat, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, 1, i_ctxBlockCat==DCT_CHROMA_DC ? 4 : 16, 0 );
+ NULL, ctx_block_cat==DCT_CHROMA_DC ? x264_zigzag_scan2 : x264_zigzag_scan4[MB_INTERLACED],
+ ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 1, ctx_block_cat==DCT_CHROMA_DC ? 4 : 16, idx, 0 );
}
-int x264_quant_4x4_trellis( x264_t *h, int16_t *dct, int i_quant_cat,
- int i_qp, int i_ctxBlockCat, int b_intra, int b_chroma, int idx )
+int x264_quant_4x4_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
+ int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx )
{
- int b_ac = (i_ctxBlockCat == DCT_LUMA_AC || i_ctxBlockCat == DCT_CHROMA_AC);
- return quant_trellis_cabac( h, dct,
- h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
- x264_dct4_weight2_zigzag[h->mb.b_interlaced],
- x264_zigzag_scan4[h->mb.b_interlaced],
- i_ctxBlockCat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, 0, 16, idx );
+ static const uint8_t ctx_ac[14] = {0,1,0,0,1,0,0,1,0,0,0,1,0,0};
+ int b_ac = ctx_ac[ctx_block_cat];
+ if( h->param.b_cabac )
+ return quant_trellis_cabac( h, dct,
+ h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
+ x264_dct4_weight2_zigzag[MB_INTERLACED],
+ x264_zigzag_scan4[MB_INTERLACED],
+ ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, b_chroma, 0, 16, idx );
+
+ return quant_trellis_cavlc( h, dct,
+ h->quant4_mf[i_quant_cat][i_qp], h->unquant4_mf[i_quant_cat][i_qp],
+ x264_dct4_weight2_zigzag[MB_INTERLACED],
+ x264_zigzag_scan4[MB_INTERLACED],
+ ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], b_ac, b_chroma, 0, 16, idx, 0 );
}
-int x264_quant_8x8_trellis( x264_t *h, int16_t *dct, int i_quant_cat,
- int i_qp, int b_intra, int idx )
+int x264_quant_8x8_trellis( x264_t *h, dctcoef *dct, int i_quant_cat,
+ int i_qp, int ctx_block_cat, int b_intra, int b_chroma, int idx )
{
- return quant_trellis_cabac( h, dct,
- h->quant8_mf[i_quant_cat][i_qp], h->unquant8_mf[i_quant_cat][i_qp],
- x264_dct8_weight2_zigzag[h->mb.b_interlaced],
- x264_zigzag_scan8[h->mb.b_interlaced],
- DCT_LUMA_8x8, h->mb.i_trellis_lambda2[0][b_intra], 0, 0, 64, idx );
-}
+ if( h->param.b_cabac )
+ {
+ return quant_trellis_cabac( h, dct,
+ h->quant8_mf[i_quant_cat][i_qp], h->unquant8_mf[i_quant_cat][i_qp],
+ x264_dct8_weight2_zigzag[MB_INTERLACED],
+ x264_zigzag_scan8[MB_INTERLACED],
+ ctx_block_cat, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 0, 64, idx );
+ }
+ /* 8x8 CAVLC is split into 4 4x4 blocks */
+ int nzaccum = 0;
+ for( int i = 0; i < 4; i++ )
+ {
+ int nz = quant_trellis_cavlc( h, dct,
+ h->quant8_mf[i_quant_cat][i_qp], h->unquant8_mf[i_quant_cat][i_qp],
+ x264_dct8_weight2_zigzag[MB_INTERLACED],
+ x264_zigzag_scan8[MB_INTERLACED],
+ DCT_LUMA_4x4, h->mb.i_trellis_lambda2[b_chroma][b_intra], 0, b_chroma, 0, 16, idx*4+i, 1 );
+ /* Set up nonzero count for future calls */
+ h->mb.cache.non_zero_count[x264_scan8[idx*4+i]] = nz;
+ nzaccum |= nz;
+ }
+ return nzaccum;
+}