X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=encoder%2Fmacroblock.c;h=f8eb45fdff5a40da62a3a12de46e7b6b7aa774da;hb=dd354db4db2f26e63ed36eb790052c6794e5a684;hp=6faa305e7c52cf3c6f510f473d1adce3b448106b;hpb=e394bd600ba9b1a3cee24e7d0b01dfb0acc5d1ad;p=x264 diff --git a/encoder/macroblock.c b/encoder/macroblock.c index 6faa305e..f8eb45fd 100644 --- a/encoder/macroblock.c +++ b/encoder/macroblock.c @@ -1,11 +1,12 @@ /***************************************************************************** - * macroblock.c: h264 encoder library + * macroblock.c: macroblock encoding ***************************************************************************** - * Copyright (C) 2003-2008 x264 project + * Copyright (C) 2003-2011 x264 project * * Authors: Laurent Aimar * Loren Merritt * Fiona Glaser + * Henrik Gramner * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -20,6 +21,9 @@ * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. + * + * This program is also available under a commercial proprietary license. + * For more information, contact us at licensing@x264.com. *****************************************************************************/ #include "common/common.h" @@ -27,8 +31,8 @@ /* These chroma DC functions don't have assembly versions and are only used here. */ -#define ZIG(i,y,x) level[i] = dct[x][y]; -static inline void zigzag_scan_2x2_dc( int16_t level[4], int16_t dct[2][2] ) +#define ZIG(i,y,x) level[i] = dct[x*2+y]; +static inline void zigzag_scan_2x2_dc( dctcoef level[4], dctcoef dct[4] ) { ZIG(0,0,0) ZIG(1,0,1) @@ -37,69 +41,75 @@ static inline void zigzag_scan_2x2_dc( int16_t level[4], int16_t dct[2][2] ) } #undef ZIG -#define IDCT_DEQUANT_START \ - int d0 = dct[0][0] + dct[0][1]; \ - int d1 = dct[1][0] + dct[1][1]; \ - int d2 = dct[0][0] - dct[0][1]; \ - int d3 = dct[1][0] - dct[1][1]; \ - int dmf = dequant_mf[i_qp%6][0][0]; \ - int qbits = i_qp/6 - 5; \ - if( qbits > 0 ) \ - { \ - dmf <<= qbits; \ - qbits = 0; \ - } - -static inline void idct_dequant_2x2_dc( int16_t dct[2][2], int16_t dct4x4[4][4][4], int dequant_mf[6][4][4], int i_qp ) +static inline void zigzag_scan_2x4_dc( dctcoef level[8], dctcoef dct[8] ) { - IDCT_DEQUANT_START - dct4x4[0][0][0] = (d0 + d1) * dmf >> -qbits; - dct4x4[1][0][0] = (d0 - d1) * dmf >> -qbits; - dct4x4[2][0][0] = (d2 + d3) * dmf >> -qbits; - dct4x4[3][0][0] = (d2 - d3) * dmf >> -qbits; + level[0] = dct[0]; + level[1] = dct[2]; + level[2] = dct[1]; + level[3] = dct[4]; + level[4] = dct[6]; + level[5] = dct[3]; + level[6] = dct[5]; + level[7] = dct[7]; } -static inline void idct_dequant_2x2_dconly( int16_t dct[2][2], int dequant_mf[6][4][4], int i_qp ) +#define IDCT_DEQUANT_2X2_START \ + int d0 = dct[0] + dct[1]; \ + int d1 = dct[2] + dct[3]; \ + int d2 = dct[0] - dct[1]; \ + int d3 = dct[2] - dct[3]; \ + int dmf = dequant_mf[i_qp%6][0] << i_qp/6; + +static inline void idct_dequant_2x2_dc( dctcoef dct[4], dctcoef dct4x4[4][16], int dequant_mf[6][16], int i_qp ) { - IDCT_DEQUANT_START - dct[0][0] = (d0 + d1) * dmf >> -qbits; - dct[0][1] = (d0 - d1) * dmf >> -qbits; - dct[1][0] = (d2 + d3) * dmf >> -qbits; - dct[1][1] = (d2 - d3) * dmf >> -qbits; + IDCT_DEQUANT_2X2_START + dct4x4[0][0] = (d0 + d1) * dmf >> 5; + dct4x4[1][0] = (d0 - d1) * dmf >> 5; + dct4x4[2][0] = (d2 + d3) * dmf >> 5; + dct4x4[3][0] = (d2 - d3) * dmf >> 5; } -static inline void dct2x2dc( int16_t d[2][2], int16_t dct4x4[4][4][4] ) +static inline void idct_dequant_2x2_dconly( dctcoef dct[4], int dequant_mf[6][16], int i_qp ) { - int d0 = dct4x4[0][0][0] + dct4x4[1][0][0]; - int d1 = dct4x4[2][0][0] + dct4x4[3][0][0]; - int d2 = dct4x4[0][0][0] - dct4x4[1][0][0]; - int d3 = dct4x4[2][0][0] - dct4x4[3][0][0]; - d[0][0] = d0 + d1; - d[1][0] = d2 + d3; - d[0][1] = d0 - d1; - d[1][1] = d2 - d3; - dct4x4[0][0][0] = 0; - dct4x4[1][0][0] = 0; - dct4x4[2][0][0] = 0; - dct4x4[3][0][0] = 0; + IDCT_DEQUANT_2X2_START + dct[0] = (d0 + d1) * dmf >> 5; + dct[1] = (d0 - d1) * dmf >> 5; + dct[2] = (d2 + d3) * dmf >> 5; + dct[3] = (d2 - d3) * dmf >> 5; } +#undef IDCT_2X2_DEQUANT_START -static ALWAYS_INLINE int x264_quant_4x4( x264_t *h, int16_t dct[4][4], int i_qp, int i_ctxBlockCat, int b_intra, int idx ) +static inline void dct2x2dc( dctcoef d[4], dctcoef dct4x4[4][16] ) { - int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY; - if( h->mb.b_trellis ) - return x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, i_ctxBlockCat, b_intra, idx ); - else - return h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] ); + int d0 = dct4x4[0][0] + dct4x4[1][0]; + int d1 = dct4x4[2][0] + dct4x4[3][0]; + int d2 = dct4x4[0][0] - dct4x4[1][0]; + int d3 = dct4x4[2][0] - dct4x4[3][0]; + d[0] = d0 + d1; + d[2] = d2 + d3; + d[1] = d0 - d1; + d[3] = d2 - d3; + dct4x4[0][0] = 0; + dct4x4[1][0] = 0; + dct4x4[2][0] = 0; + dct4x4[3][0] = 0; } -static ALWAYS_INLINE int x264_quant_8x8( x264_t *h, int16_t dct[8][8], int i_qp, int b_intra, int idx ) +static ALWAYS_INLINE int array_non_zero( dctcoef *v, int i_count ) { - int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY; - if( h->mb.b_trellis ) - return x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx ); + if( WORD_SIZE == 8 ) + { + for( int i = 0; i < i_count; i += 8/sizeof(dctcoef) ) + if( M64( &v[i] ) ) + return 1; + } else - return h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] ); + { + for( int i = 0; i < i_count; i += 4/sizeof(dctcoef) ) + if( M32( &v[i] ) ) + return 1; + } + return 0; } /* All encoding functions must output the correct CBP and NNZ values. @@ -113,315 +123,401 @@ static ALWAYS_INLINE int x264_quant_8x8( x264_t *h, int16_t dct[8][8], int i_qp, /* This means that decimation can be done merely by adjusting the CBP and NNZ * rather than memsetting the coefficients. */ -void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp ) +static void x264_mb_encode_i16x16( x264_t *h, int p, int i_qp ) { - int nz; - uint8_t *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]]; - uint8_t *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]]; - DECLARE_ALIGNED_16( int16_t dct4x4[4][4] ); + pixel *p_src = h->mb.pic.p_fenc[p]; + pixel *p_dst = h->mb.pic.p_fdec[p]; - if( h->mb.b_lossless ) - { - h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst ); - nz = array_non_zero( h->dct.luma4x4[idx] ); - h->mb.cache.non_zero_count[x264_scan8[idx]] = nz; - h->mb.i_cbp_luma |= nz<<(idx>>2); - return; - } + ALIGNED_ARRAY_16( dctcoef, dct4x4,[16],[16] ); + ALIGNED_ARRAY_16( dctcoef, dct_dc4x4,[16] ); - h->dctf.sub4x4_dct( dct4x4, p_src, p_dst ); - - nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx ); - h->mb.cache.non_zero_count[x264_scan8[idx]] = nz; - if( nz ) - { - h->mb.i_cbp_luma |= 1<<(idx>>2); - h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 ); - h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp ); - h->dctf.add4x4_idct( p_dst, dct4x4 ); - } -} - -#define STORE_8x8_NNZ(idx,nz)\ -{\ - *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[idx*4+0]] = nz * 0x0101;\ - *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[idx*4+2]] = nz * 0x0101;\ -} - -void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp ) -{ - int x = 8 * (idx&1); - int y = 8 * (idx>>1); - int nz; - uint8_t *p_src = &h->mb.pic.p_fenc[0][x+y*FENC_STRIDE]; - uint8_t *p_dst = &h->mb.pic.p_fdec[0][x+y*FDEC_STRIDE]; - DECLARE_ALIGNED_16( int16_t dct8x8[8][8] ); + int nz, block_cbp = 0; + int decimate_score = h->mb.b_dct_decimate ? 0 : 9; + int i_quant_cat = p ? CQM_4IC : CQM_4IY; + int i_mode = h->mb.i_intra16x16_pred_mode; if( h->mb.b_lossless ) - { - h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst ); - nz = array_non_zero( h->dct.luma8x8[idx] ); - STORE_8x8_NNZ(idx,nz); - h->mb.i_cbp_luma |= nz<dctf.sub8x8_dct8( dct8x8, p_src, p_dst ); - - nz = x264_quant_8x8( h, dct8x8, i_qp, 1, idx ); - h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 ); - if( nz ) - { - h->mb.i_cbp_luma |= 1<quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp ); - h->dctf.add8x8_idct8( p_dst, dct8x8 ); - STORE_8x8_NNZ(idx,1); - } + x264_predict_lossless_16x16( h, p, i_mode ); else - STORE_8x8_NNZ(idx,0); -} - -static void x264_mb_encode_i16x16( x264_t *h, int i_qp ) -{ - uint8_t *p_src = h->mb.pic.p_fenc[0]; - uint8_t *p_dst = h->mb.pic.p_fdec[0]; - - DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] ); - DECLARE_ALIGNED_16( int16_t dct_dc4x4[4][4] ); - - int i, nz; + h->predict_16x16[i_mode]( h->mb.pic.p_fdec[p] ); if( h->mb.b_lossless ) { - for( i = 0; i < 16; i++ ) + for( int i = 0; i < 16; i++ ) { int oe = block_idx_xy_fenc[i]; int od = block_idx_xy_fdec[i]; - h->zigzagf.sub_4x4( h->dct.luma4x4[i], p_src+oe, p_dst+od ); - dct_dc4x4[0][block_idx_yx_1d[i]] = h->dct.luma4x4[i][0]; - h->dct.luma4x4[i][0] = 0; - nz = array_non_zero( h->dct.luma4x4[i] ); - h->mb.cache.non_zero_count[x264_scan8[i]] = nz; - h->mb.i_cbp_luma |= nz; + nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16*p+i], p_src+oe, p_dst+od, &dct_dc4x4[block_idx_yx_1d[i]] ); + h->mb.cache.non_zero_count[x264_scan8[16*p+i]] = nz; + block_cbp |= nz; } - h->mb.i_cbp_luma *= 0xf; - h->mb.cache.non_zero_count[x264_scan8[24]] = array_non_zero( dct_dc4x4 ); - h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 ); + h->mb.i_cbp_luma |= block_cbp * 0xf; + h->mb.cache.non_zero_count[x264_scan8[LUMA_DC+p]] = array_non_zero( dct_dc4x4, 16 ); + h->zigzagf.scan_4x4( h->dct.luma16x16_dc[p], dct_dc4x4 ); return; } h->dctf.sub16x16_dct( dct4x4, p_src, p_dst ); - for( i = 0; i < 16; i++ ) + for( int i = 0; i < 16; i++ ) { /* copy dc coeff */ - dct_dc4x4[0][block_idx_xy_1d[i]] = dct4x4[i][0][0]; - dct4x4[i][0][0] = 0; + if( h->mb.b_noise_reduction ) + h->quantf.denoise_dct( dct4x4[i], h->nr_residual_sum[0], h->nr_offset[0], 16 ); + dct_dc4x4[block_idx_xy_1d[i]] = dct4x4[i][0]; + dct4x4[i][0] = 0; /* quant/scan/dequant */ - nz = x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i ); - h->mb.cache.non_zero_count[x264_scan8[i]] = nz; + if( h->mb.b_trellis ) + nz = x264_quant_4x4_trellis( h, dct4x4[i], i_quant_cat, i_qp, ctx_cat_plane[DCT_LUMA_AC][p], 1, !!p, i ); + else + nz = h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] ); + h->mb.cache.non_zero_count[x264_scan8[16*p+i]] = nz; if( nz ) { - h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] ); - h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp ); - h->mb.i_cbp_luma = 0xf; + h->zigzagf.scan_4x4( h->dct.luma4x4[16*p+i], dct4x4[i] ); + h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[i_quant_cat], i_qp ); + if( decimate_score < 6 ) decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16*p+i] ); + block_cbp = 0xf; } } + /* Writing the 16 CBFs in an i16x16 block is quite costly, so decimation can save many bits. */ + /* More useful with CAVLC, but still useful with CABAC. */ + if( decimate_score < 6 ) + { + CLEAR_16x16_NNZ( p ); + block_cbp = 0; + } + else + h->mb.i_cbp_luma |= block_cbp; + h->dctf.dct4x4dc( dct_dc4x4 ); if( h->mb.b_trellis ) - nz = x264_quant_dc_trellis( h, (int16_t*)dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1); + nz = x264_quant_luma_dc_trellis( h, dct_dc4x4, i_quant_cat, i_qp, ctx_cat_plane[DCT_LUMA_DC][p], 1, LUMA_DC+p ); else - nz = h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 ); + nz = h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[i_quant_cat][i_qp][0]>>1, h->quant4_bias[i_quant_cat][i_qp][0]<<1 ); - h->mb.cache.non_zero_count[x264_scan8[24]] = nz; + h->mb.cache.non_zero_count[x264_scan8[LUMA_DC+p]] = nz; if( nz ) { - h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 ); + h->zigzagf.scan_4x4( h->dct.luma16x16_dc[p], dct_dc4x4 ); /* output samples to fdec */ h->dctf.idct4x4dc( dct_dc4x4 ); - h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp ); /* XXX not inversed */ - if( h->mb.i_cbp_luma ) - for( i = 0; i < 16; i++ ) - dct4x4[i][0][0] = dct_dc4x4[0][block_idx_xy_1d[i]]; + h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[i_quant_cat], i_qp ); /* XXX not inversed */ + if( block_cbp ) + for( int i = 0; i < 16; i++ ) + dct4x4[i][0] = dct_dc4x4[block_idx_xy_1d[i]]; } /* put pixels to fdec */ - if( h->mb.i_cbp_luma ) + if( block_cbp ) h->dctf.add16x16_idct( p_dst, dct4x4 ); else if( nz ) h->dctf.add16x16_idct_dc( p_dst, dct_dc4x4 ); } -void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp ) +/* Round down coefficients losslessly in DC-only chroma blocks. + * Unlike luma blocks, this can't be done with a lookup table or + * other shortcut technique because of the interdependencies + * between the coefficients due to the chroma DC transform. */ +static ALWAYS_INLINE int x264_mb_optimize_chroma_dc( x264_t *h, dctcoef *dct_dc, int dequant_mf[6][16], int i_qp, int chroma422 ) { - int i, ch, nz, nz_dc; - int b_decimate = b_inter && (h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate); + int dmf = dequant_mf[i_qp%6][0] << i_qp/6; + + /* If the QP is too high, there's no benefit to rounding optimization. */ + if( dmf > 32*64 ) + return 1; + + if( chroma422 ) + return h->quantf.optimize_chroma_2x4_dc( dct_dc, dmf ); + else + return h->quantf.optimize_chroma_2x2_dc( dct_dc, dmf ); +} + +static ALWAYS_INLINE void x264_mb_encode_chroma_internal( x264_t *h, int b_inter, int i_qp, int chroma422 ) +{ + int nz, nz_dc; + int b_decimate = b_inter && h->mb.b_dct_decimate; + int (*dequant_mf)[16] = h->dequant4_mf[CQM_4IC + b_inter]; + ALIGNED_ARRAY_16( dctcoef, dct_dc,[8] ); h->mb.i_cbp_chroma = 0; + h->nr_count[2] += h->mb.b_noise_reduction * 4; + + /* Early termination: check variance of chroma residual before encoding. + * Don't bother trying early termination at low QPs. + * Values are experimentally derived. */ + if( b_decimate && i_qp >= (h->mb.b_trellis ? 12 : 18) && !h->mb.b_noise_reduction ) + { + int thresh = chroma422 ? (x264_lambda2_tab[i_qp] + 16) >> 5 : (x264_lambda2_tab[i_qp] + 32) >> 6; + int ssd[2]; + int chromapix = chroma422 ? PIXEL_8x16 : PIXEL_8x8; + + int score = h->pixf.var2[chromapix]( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE, &ssd[0] ); + if( score < thresh*4 ) + score += h->pixf.var2[chromapix]( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE, &ssd[1] ); + if( score < thresh*4 ) + { + M16( &h->mb.cache.non_zero_count[x264_scan8[16]] ) = 0; + M16( &h->mb.cache.non_zero_count[x264_scan8[18]] ) = 0; + M16( &h->mb.cache.non_zero_count[x264_scan8[32]] ) = 0; + M16( &h->mb.cache.non_zero_count[x264_scan8[34]] ) = 0; + if( chroma422 ) + { + M16( &h->mb.cache.non_zero_count[x264_scan8[24]] ) = 0; + M16( &h->mb.cache.non_zero_count[x264_scan8[26]] ) = 0; + M16( &h->mb.cache.non_zero_count[x264_scan8[40]] ) = 0; + M16( &h->mb.cache.non_zero_count[x264_scan8[42]] ) = 0; + } + h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+0]] = 0; + h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+1]] = 0; + + for( int ch = 0; ch < 2; ch++ ) + { + if( ssd[ch] > thresh ) + { + pixel *p_src = h->mb.pic.p_fenc[1+ch]; + pixel *p_dst = h->mb.pic.p_fdec[1+ch]; + + if( chroma422 ) + /* Cannot be replaced by two calls to sub8x8_dct_dc since the hadamard transform is different */ + h->dctf.sub8x16_dct_dc( dct_dc, p_src, p_dst ); + else + h->dctf.sub8x8_dct_dc( dct_dc, p_src, p_dst ); + + if( h->mb.b_trellis ) + nz_dc = x264_quant_chroma_dc_trellis( h, dct_dc, i_qp+3*chroma422, !b_inter, CHROMA_DC+ch ); + else + { + nz_dc = 0; + for( int i = 0; i <= chroma422; i++ ) + nz_dc |= h->quantf.quant_2x2_dc( &dct_dc[4*i], h->quant4_mf[CQM_4IC+b_inter][i_qp+3*chroma422][0] >> 1, + h->quant4_bias[CQM_4IC+b_inter][i_qp+3*chroma422][0] << 1 ); + } - for( ch = 0; ch < 2; ch++ ) + if( nz_dc ) + { + if( !x264_mb_optimize_chroma_dc( h, dct_dc, dequant_mf, i_qp+3*chroma422, chroma422 ) ) + continue; + h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+ch]] = 1; + if( chroma422 ) + { + zigzag_scan_2x4_dc( h->dct.chroma_dc[ch], dct_dc ); + h->quantf.idct_dequant_2x4_dconly( dct_dc, dequant_mf, i_qp+3 ); + } + else + { + zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct_dc ); + idct_dequant_2x2_dconly( dct_dc, dequant_mf, i_qp ); + } + + for( int i = 0; i <= chroma422; i++ ) + h->dctf.add8x8_idct_dc( p_dst + 8*i*FDEC_STRIDE, &dct_dc[4*i] ); + h->mb.i_cbp_chroma = 1; + } + } + } + return; + } + } + + for( int ch = 0; ch < 2; ch++ ) { - uint8_t *p_src = h->mb.pic.p_fenc[1+ch]; - uint8_t *p_dst = h->mb.pic.p_fdec[1+ch]; + pixel *p_src = h->mb.pic.p_fenc[1+ch]; + pixel *p_dst = h->mb.pic.p_fdec[1+ch]; int i_decimate_score = 0; int nz_ac = 0; - DECLARE_ALIGNED_16( int16_t dct2x2[2][2] ); - DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] ); + ALIGNED_ARRAY_16( dctcoef, dct4x4,[8],[16] ); if( h->mb.b_lossless ) { - for( i = 0; i < 4; i++ ) + static const uint8_t chroma422_scan[8] = { 0, 2, 1, 5, 3, 6, 4, 7 }; + + for( int i = 0; i < (chroma422?8:4); i++ ) { - int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE; - int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE; - h->zigzagf.sub_4x4( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od ); - h->dct.chroma_dc[ch][i] = h->dct.luma4x4[16+i+ch*4][0]; - h->dct.luma4x4[16+i+ch*4][0] = 0; - nz = array_non_zero( h->dct.luma4x4[16+i+ch*4] ); - h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz; + int oe = 4*(i&1) + 4*(i>>1)*FENC_STRIDE; + int od = 4*(i&1) + 4*(i>>1)*FDEC_STRIDE; + nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i+(chroma422?i&4:0)+ch*16], p_src+oe, p_dst+od, + &h->dct.chroma_dc[ch][chroma422?chroma422_scan[i]:i] ); + h->mb.cache.non_zero_count[x264_scan8[16+i+(chroma422?i&4:0)+ch*16]] = nz; h->mb.i_cbp_chroma |= nz; } - h->mb.cache.non_zero_count[x264_scan8[25]+ch] = array_non_zero( h->dct.chroma_dc[ch] ); + h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+ch]] = array_non_zero( h->dct.chroma_dc[ch], chroma422?8:4 ); continue; } - h->dctf.sub8x8_dct( dct4x4, p_src, p_dst ); - dct2x2dc( dct2x2, dct4x4 ); + for( int i = 0; i <= chroma422; i++ ) + h->dctf.sub8x8_dct( &dct4x4[4*i], p_src + 8*i*FENC_STRIDE, p_dst + 8*i*FDEC_STRIDE ); + + if( h->mb.b_noise_reduction ) + for( int i = 0; i < (chroma422?8:4); i++ ) + h->quantf.denoise_dct( dct4x4[i], h->nr_residual_sum[2], h->nr_offset[2], 16 ); + + if( chroma422 ) + h->dctf.dct2x4dc( dct_dc, dct4x4 ); + else + dct2x2dc( dct_dc, dct4x4 ); + /* calculate dct coeffs */ - for( i = 0; i < 4; i++ ) + for( int i = 0; i < (chroma422?8:4); i++ ) { if( h->mb.b_trellis ) - nz = x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 0 ); + nz = x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 1, 0 ); else nz = h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] ); - h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz; + h->mb.cache.non_zero_count[x264_scan8[16+i+(chroma422?i&4:0)+ch*16]] = nz; if( nz ) { nz_ac = 1; - h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] ); - h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp ); + h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+(chroma422?i&4:0)+ch*16], dct4x4[i] ); + h->quantf.dequant_4x4( dct4x4[i], dequant_mf, i_qp ); if( b_decimate ) - i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] ); + i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+(chroma422?i&4:0)+ch*16] ); } } if( h->mb.b_trellis ) - nz_dc = x264_quant_dc_trellis( h, (int16_t*)dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter ); + nz_dc = x264_quant_chroma_dc_trellis( h, dct_dc, i_qp+3*chroma422, !b_inter, CHROMA_DC+ch ); else - nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 ); + { + nz_dc = 0; + for( int i = 0; i <= chroma422; i++ ) + nz_dc |= h->quantf.quant_2x2_dc( &dct_dc[4*i], h->quant4_mf[CQM_4IC+b_inter][i_qp+3*chroma422][0] >> 1, + h->quant4_bias[CQM_4IC+b_inter][i_qp+3*chroma422][0] << 1 ); + } - h->mb.cache.non_zero_count[x264_scan8[25]+ch] = nz_dc; + h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+ch]] = nz_dc; if( (b_decimate && i_decimate_score < 7) || !nz_ac ) { /* Decimate the block */ - h->mb.cache.non_zero_count[x264_scan8[16+0]+24*ch] = 0; - h->mb.cache.non_zero_count[x264_scan8[16+1]+24*ch] = 0; - h->mb.cache.non_zero_count[x264_scan8[16+2]+24*ch] = 0; - h->mb.cache.non_zero_count[x264_scan8[16+3]+24*ch] = 0; + M16( &h->mb.cache.non_zero_count[x264_scan8[16+16*ch]] ) = 0; + M16( &h->mb.cache.non_zero_count[x264_scan8[18+16*ch]] ) = 0; + if( chroma422 ) + { + M16( &h->mb.cache.non_zero_count[x264_scan8[24+16*ch]] ) = 0; + M16( &h->mb.cache.non_zero_count[x264_scan8[26+16*ch]] ) = 0; + } + if( !nz_dc ) /* Whole block is empty */ continue; + if( !x264_mb_optimize_chroma_dc( h, dct_dc, dequant_mf, i_qp+3*chroma422, chroma422 ) ) + { + h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+ch]] = 0; + continue; + } /* DC-only */ - zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 ); - idct_dequant_2x2_dconly( dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp ); - h->dctf.add8x8_idct_dc( p_dst, dct2x2 ); + if( chroma422 ) + { + zigzag_scan_2x4_dc( h->dct.chroma_dc[ch], dct_dc ); + h->quantf.idct_dequant_2x4_dconly( dct_dc, dequant_mf, i_qp+3 ); + } + else + { + zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct_dc ); + idct_dequant_2x2_dconly( dct_dc, dequant_mf, i_qp ); + } + + for( int i = 0; i <= chroma422; i++ ) + h->dctf.add8x8_idct_dc( p_dst + 8*i*FDEC_STRIDE, &dct_dc[4*i] ); } else { h->mb.i_cbp_chroma = 1; + if( nz_dc ) { - zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 ); - idct_dequant_2x2_dc( dct2x2, dct4x4, h->dequant4_mf[CQM_4IC + b_inter], i_qp ); + if( chroma422 ) + { + zigzag_scan_2x4_dc( h->dct.chroma_dc[ch], dct_dc ); + h->quantf.idct_dequant_2x4_dc( dct_dc, dct4x4, dequant_mf, i_qp+3 ); + } + else + { + zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct_dc ); + idct_dequant_2x2_dc( dct_dc, dct4x4, dequant_mf, i_qp ); + } } - h->dctf.add8x8_idct( p_dst, dct4x4 ); + + for( int i = 0; i <= chroma422; i++ ) + h->dctf.add8x8_idct( p_dst + 8*i*FDEC_STRIDE, &dct4x4[4*i] ); } } - if( h->mb.i_cbp_chroma ) - h->mb.i_cbp_chroma = 2; /* dc+ac (we can't do only ac) */ - else if( h->mb.cache.non_zero_count[x264_scan8[25]] | - h->mb.cache.non_zero_count[x264_scan8[26]] ) - h->mb.i_cbp_chroma = 1; /* dc only */ + /* 0 = none, 1 = DC only, 2 = DC+AC */ + h->mb.i_cbp_chroma += (h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+0]] | + h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+1]] | h->mb.i_cbp_chroma); } -static void x264_macroblock_encode_skip( x264_t *h ) +void x264_mb_encode_chroma( x264_t *h, int b_inter, int i_qp ) { - h->mb.i_cbp_luma = 0x00; - h->mb.i_cbp_chroma = 0x00; - memset( h->mb.cache.non_zero_count, 0, X264_SCAN8_SIZE ); - /* store cbp */ - h->mb.cbp[h->mb.i_mb_xy] = 0; + if( CHROMA_FORMAT == CHROMA_420 ) + x264_mb_encode_chroma_internal( h, b_inter, i_qp, 0 ); + else + x264_mb_encode_chroma_internal( h, b_inter, i_qp, 1 ); } -/***************************************************************************** - * x264_macroblock_encode_pskip: - * Encode an already marked skip block - *****************************************************************************/ -static void x264_macroblock_encode_pskip( x264_t *h ) +static void x264_macroblock_encode_skip( x264_t *h ) { - const int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0], - h->mb.mv_min[0], h->mb.mv_max[0] ); - const int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1], - h->mb.mv_min[1], h->mb.mv_max[1] ); - - /* don't do pskip motion compensation if it was already done in macroblock_analyse */ - if( !h->mb.b_skip_mc ) + M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = 0; + M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = 0; + M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = 0; + M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = 0; + M32( &h->mb.cache.non_zero_count[x264_scan8[16+ 0]] ) = 0; + M32( &h->mb.cache.non_zero_count[x264_scan8[16+ 2]] ) = 0; + M32( &h->mb.cache.non_zero_count[x264_scan8[32+ 0]] ) = 0; + M32( &h->mb.cache.non_zero_count[x264_scan8[32+ 2]] ) = 0; + if( CHROMA_FORMAT >= CHROMA_422 ) { - h->mc.mc_luma( h->mb.pic.p_fdec[0], FDEC_STRIDE, - h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0], - mvx, mvy, 16, 16 ); - - h->mc.mc_chroma( h->mb.pic.p_fdec[1], FDEC_STRIDE, - h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1], - mvx, mvy, 8, 8 ); - - h->mc.mc_chroma( h->mb.pic.p_fdec[2], FDEC_STRIDE, - h->mb.pic.p_fref[0][0][5], h->mb.pic.i_stride[2], - mvx, mvy, 8, 8 ); + M32( &h->mb.cache.non_zero_count[x264_scan8[16+ 8]] ) = 0; + M32( &h->mb.cache.non_zero_count[x264_scan8[16+10]] ) = 0; + M32( &h->mb.cache.non_zero_count[x264_scan8[32+ 8]] ) = 0; + M32( &h->mb.cache.non_zero_count[x264_scan8[32+10]] ) = 0; } - - x264_macroblock_encode_skip( h ); + h->mb.i_cbp_luma = 0; + h->mb.i_cbp_chroma = 0; + h->mb.cbp[h->mb.i_mb_xy] = 0; } /***************************************************************************** * Intra prediction for predictive lossless mode. *****************************************************************************/ -/* Note that these functions take a shortcut (mc.copy instead of actual pixel prediction) which assumes - * that the edge pixels of the reconstructed frame are the same as that of the source frame. This means - * they will only work correctly if the neighboring blocks are losslessly coded. In practice, this means - * lossless mode cannot be mixed with lossy mode within a frame. */ -/* This can be resolved by explicitly copying the edge pixels after doing the mc.copy, but this doesn't - * need to be done unless we decide to allow mixing lossless and lossy compression. */ - -void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode ) +void x264_predict_lossless_chroma( x264_t *h, int i_mode ) { - int stride = h->fenc->i_stride[1] << h->mb.b_interlaced; + int height = 16 >> CHROMA_V_SHIFT; if( i_mode == I_PRED_CHROMA_V ) { - h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-stride, stride, 8 ); - h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-stride, stride, 8 ); + h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1]-FENC_STRIDE, FENC_STRIDE, height ); + h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2]-FENC_STRIDE, FENC_STRIDE, height ); + memcpy( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[1]-FDEC_STRIDE, 8*sizeof(pixel) ); + memcpy( h->mb.pic.p_fdec[2], h->mb.pic.p_fdec[2]-FDEC_STRIDE, 8*sizeof(pixel) ); } else if( i_mode == I_PRED_CHROMA_H ) { - h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-1, stride, 8 ); - h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-1, stride, 8 ); + h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1]-1, FENC_STRIDE, height ); + h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2]-1, FENC_STRIDE, height ); + x264_copy_column8( h->mb.pic.p_fdec[1]+4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+4*FDEC_STRIDE-1 ); + x264_copy_column8( h->mb.pic.p_fdec[2]+4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+4*FDEC_STRIDE-1 ); + if( CHROMA_FORMAT == CHROMA_422 ) + { + x264_copy_column8( h->mb.pic.p_fdec[1]+12*FDEC_STRIDE, h->mb.pic.p_fdec[1]+12*FDEC_STRIDE-1 ); + x264_copy_column8( h->mb.pic.p_fdec[2]+12*FDEC_STRIDE, h->mb.pic.p_fdec[2]+12*FDEC_STRIDE-1 ); + } } else { - h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] ); - h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] ); + h->predict_chroma[i_mode]( h->mb.pic.p_fdec[1] ); + h->predict_chroma[i_mode]( h->mb.pic.p_fdec[2] ); } } -void x264_predict_lossless_4x4( x264_t *h, uint8_t *p_dst, int idx, int i_mode ) +void x264_predict_lossless_4x4( x264_t *h, pixel *p_dst, int p, int idx, int i_mode ) { - int stride = h->fenc->i_stride[0] << h->mb.b_interlaced; - uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride; + int stride = h->fenc->i_stride[p] << MB_INTERLACED; + pixel *p_src = h->mb.pic.p_fenc_plane[p] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride; if( i_mode == I_PRED_4x4_V ) h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 ); @@ -431,10 +527,10 @@ void x264_predict_lossless_4x4( x264_t *h, uint8_t *p_dst, int idx, int i_mode ) h->predict_4x4[i_mode]( p_dst ); } -void x264_predict_lossless_8x8( x264_t *h, uint8_t *p_dst, int idx, int i_mode, uint8_t edge[33] ) +void x264_predict_lossless_8x8( x264_t *h, pixel *p_dst, int p, int idx, int i_mode, pixel edge[36] ) { - int stride = h->fenc->i_stride[0] << h->mb.b_interlaced; - uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride; + int stride = h->fenc->i_stride[p] << MB_INTERLACED; + pixel *p_src = h->mb.pic.p_fenc_plane[p] + (idx&1)*8 + (idx>>1)*8*stride; if( i_mode == I_PRED_8x8_V ) h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 ); @@ -444,37 +540,46 @@ void x264_predict_lossless_8x8( x264_t *h, uint8_t *p_dst, int idx, int i_mode, h->predict_8x8[i_mode]( p_dst, edge ); } -void x264_predict_lossless_16x16( x264_t *h, int i_mode ) +void x264_predict_lossless_16x16( x264_t *h, int p, int i_mode ) { - int stride = h->fenc->i_stride[0] << h->mb.b_interlaced; + int stride = h->fenc->i_stride[p] << MB_INTERLACED; if( i_mode == I_PRED_16x16_V ) - h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 ); + h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[p], FDEC_STRIDE, h->mb.pic.p_fenc_plane[p]-stride, stride, 16 ); else if( i_mode == I_PRED_16x16_H ) - h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 ); + h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[p], FDEC_STRIDE, h->mb.pic.p_fenc_plane[p]-1, stride, 16 ); else - h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] ); + h->predict_16x16[i_mode]( h->mb.pic.p_fdec[p] ); } /***************************************************************************** * x264_macroblock_encode: *****************************************************************************/ -void x264_macroblock_encode( x264_t *h ) +static ALWAYS_INLINE void x264_macroblock_encode_internal( x264_t *h, int plane_count, int chroma ) { - int i_cbp_dc = 0; int i_qp = h->mb.i_qp; - int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate; + int b_decimate = h->mb.b_dct_decimate; int b_force_no_skip = 0; - int i,idx,nz; + int nz; h->mb.i_cbp_luma = 0; - h->mb.cache.non_zero_count[x264_scan8[24]] = 0; + for( int p = 0; p < plane_count; p++ ) + h->mb.cache.non_zero_count[x264_scan8[LUMA_DC+p]] = 0; + + if( h->mb.i_type == I_PCM ) + { + /* if PCM is chosen, we need to store reconstructed frame data */ + for( int p = 0; p < plane_count; p++ ) + h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[p], FDEC_STRIDE, h->mb.pic.p_fenc[p], FENC_STRIDE, 16 ); + if( chroma ) + { + int height = 16 >> CHROMA_V_SHIFT; + h->mc.copy[PIXEL_8x8] ( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1], FENC_STRIDE, height ); + h->mc.copy[PIXEL_8x8] ( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2], FENC_STRIDE, height ); + } + return; + } - if( h->sh.b_mbaff - && h->mb.i_mb_xy == h->sh.i_first_mb + h->mb.i_mb_stride - && IS_SKIP(h->mb.type[h->sh.i_first_mb]) ) + if( !h->mb.b_allow_skip ) { - /* The first skip is predicted to be a frame mb pair. - * We don't yet support the aff part of mbaff, so force it to non-skip - * so that we can pick the aff flag. */ b_force_no_skip = 1; if( IS_SKIP(h->mb.i_type) ) { @@ -487,8 +592,45 @@ void x264_macroblock_encode( x264_t *h ) if( h->mb.i_type == P_SKIP ) { - /* A bit special */ - x264_macroblock_encode_pskip( h ); + /* don't do pskip motion compensation if it was already done in macroblock_analyse */ + if( !h->mb.b_skip_mc ) + { + int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0], + h->mb.mv_min[0], h->mb.mv_max[0] ); + int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1], + h->mb.mv_min[1], h->mb.mv_max[1] ); + + for( int p = 0; p < plane_count; p++ ) + h->mc.mc_luma( h->mb.pic.p_fdec[p], FDEC_STRIDE, + &h->mb.pic.p_fref[0][0][p*4], h->mb.pic.i_stride[p], + mvx, mvy, 16, 16, &h->sh.weight[0][p] ); + + if( chroma ) + { + int v_shift = CHROMA_V_SHIFT; + int height = 16 >> v_shift; + + /* Special case for mv0, which is (of course) very common in P-skip mode. */ + if( mvx | mvy ) + h->mc.mc_chroma( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], FDEC_STRIDE, + h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1], + mvx, 2*mvy>>v_shift, 8, height ); + else + h->mc.load_deinterleave_chroma_fdec( h->mb.pic.p_fdec[1], h->mb.pic.p_fref[0][0][4], + h->mb.pic.i_stride[1], height ); + + if( h->sh.weight[0][1].weightfn ) + h->sh.weight[0][1].weightfn[8>>2]( h->mb.pic.p_fdec[1], FDEC_STRIDE, + h->mb.pic.p_fdec[1], FDEC_STRIDE, + &h->sh.weight[0][1], height ); + if( h->sh.weight[0][2].weightfn ) + h->sh.weight[0][2].weightfn[8>>2]( h->mb.pic.p_fdec[2], FDEC_STRIDE, + h->mb.pic.p_fdec[2], FDEC_STRIDE, + &h->sh.weight[0][2], height ); + } + } + + x264_macroblock_encode_skip( h ); return; } if( h->mb.i_type == B_SKIP ) @@ -502,46 +644,38 @@ void x264_macroblock_encode( x264_t *h ) if( h->mb.i_type == I_16x16 ) { - const int i_mode = h->mb.i_intra16x16_pred_mode; h->mb.b_transform_8x8 = 0; - if( h->mb.b_lossless ) - x264_predict_lossless_16x16( h, i_mode ); - else - h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] ); - - /* encode the 16x16 macroblock */ - x264_mb_encode_i16x16( h, i_qp ); + for( int p = 0; p < plane_count; p++ ) + { + x264_mb_encode_i16x16( h, p, i_qp ); + i_qp = h->mb.i_chroma_qp; + } } else if( h->mb.i_type == I_8x8 ) { - DECLARE_ALIGNED_16( uint8_t edge[33] ); h->mb.b_transform_8x8 = 1; /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */ if( h->mb.i_skip_intra ) { h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 ); - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = h->mb.pic.i8x8_nnz_buf[0]; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = h->mb.pic.i8x8_nnz_buf[1]; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = h->mb.pic.i8x8_nnz_buf[2]; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = h->mb.pic.i8x8_nnz_buf[3]; + M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i8x8_nnz_buf[0]; + M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i8x8_nnz_buf[1]; + M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i8x8_nnz_buf[2]; + M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i8x8_nnz_buf[3]; h->mb.i_cbp_luma = h->mb.pic.i8x8_cbp; /* In RD mode, restore the now-overwritten DCT data. */ if( h->mb.i_skip_intra == 2 ) h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) ); } - for( i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ ) + for( int p = 0; p < plane_count; p++ ) { - uint8_t *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE]; - int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]]; - x264_predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] ); - - if( h->mb.b_lossless ) - x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge ); - else - h->predict_8x8[i_mode]( p_dst, edge ); - - x264_mb_encode_i8x8( h, i, i_qp ); + for( int i = (p == 0 && h->mb.i_skip_intra) ? 3 : 0 ; i < 4; i++ ) + { + int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]]; + x264_mb_encode_i8x8( h, p, i, i_qp, i_mode, NULL, 1 ); + } + i_qp = h->mb.i_chroma_qp; } } else if( h->mb.i_type == I_4x4 ) @@ -551,34 +685,33 @@ void x264_macroblock_encode( x264_t *h ) if( h->mb.i_skip_intra ) { h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 ); - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = h->mb.pic.i4x4_nnz_buf[0]; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = h->mb.pic.i4x4_nnz_buf[1]; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = h->mb.pic.i4x4_nnz_buf[2]; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = h->mb.pic.i4x4_nnz_buf[3]; + M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i4x4_nnz_buf[0]; + M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i4x4_nnz_buf[1]; + M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i4x4_nnz_buf[2]; + M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i4x4_nnz_buf[3]; h->mb.i_cbp_luma = h->mb.pic.i4x4_cbp; /* In RD mode, restore the now-overwritten DCT data. */ if( h->mb.i_skip_intra == 2 ) h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) ); } - for( i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ ) + for( int p = 0; p < plane_count; p++ ) { - uint8_t *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]]; - int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]]; + for( int i = (p == 0 && h->mb.i_skip_intra) ? 15 : 0 ; i < 16; i++ ) + { + pixel *p_dst = &h->mb.pic.p_fdec[p][block_idx_xy_fdec[i]]; + int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]]; - if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP ) - /* emulate missing topright samples */ - *(uint32_t*) &p_dst[4-FDEC_STRIDE] = p_dst[3-FDEC_STRIDE] * 0x01010101U; + if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP ) + /* emulate missing topright samples */ + MPIXEL_X4( &p_dst[4-FDEC_STRIDE] ) = PIXEL_SPLAT_X4( p_dst[3-FDEC_STRIDE] ); - if( h->mb.b_lossless ) - x264_predict_lossless_4x4( h, p_dst, i, i_mode ); - else - h->predict_4x4[i_mode]( p_dst ); - x264_mb_encode_i4x4( h, i, i_qp ); + x264_mb_encode_i4x4( h, p, i, i_qp, i_mode, 1 ); + } + i_qp = h->mb.i_chroma_qp; } } else /* Inter MB */ { - int i8x8, i4x4; int i_decimate_mb = 0; /* Don't repeat motion compensation if it was already done in non-RD transform analysis */ @@ -588,171 +721,182 @@ void x264_macroblock_encode( x264_t *h ) if( h->mb.b_lossless ) { if( h->mb.b_transform_8x8 ) - for( i8x8 = 0; i8x8 < 4; i8x8++ ) - { - int x = 8*(i8x8&1); - int y = 8*(i8x8>>1); - h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8], - h->mb.pic.p_fenc[0]+x+y*FENC_STRIDE, - h->mb.pic.p_fdec[0]+x+y*FDEC_STRIDE ); - nz = array_non_zero( h->dct.luma8x8[i8x8] ); - STORE_8x8_NNZ(i8x8,nz); - h->mb.i_cbp_luma |= nz << i8x8; - } + for( int p = 0; p < plane_count; p++ ) + for( int i8x8 = 0; i8x8 < 4; i8x8++ ) + { + int x = i8x8&1; + int y = i8x8>>1; + nz = h->zigzagf.sub_8x8( h->dct.luma8x8[p*4+i8x8], h->mb.pic.p_fenc[p] + 8*x + 8*y*FENC_STRIDE, + h->mb.pic.p_fdec[p] + 8*x + 8*y*FDEC_STRIDE ); + STORE_8x8_NNZ( p, i8x8, nz ); + h->mb.i_cbp_luma |= nz << i8x8; + } else - for( i4x4 = 0; i4x4 < 16; i4x4++ ) - { - h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4], - h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4], - h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] ); - nz = array_non_zero( h->dct.luma4x4[i4x4] ); - h->mb.cache.non_zero_count[x264_scan8[i4x4]] = nz; - h->mb.i_cbp_luma |= nz << (i4x4>>2); - } + for( int p = 0; p < plane_count; p++ ) + for( int i4x4 = 0; i4x4 < 16; i4x4++ ) + { + nz = h->zigzagf.sub_4x4( h->dct.luma4x4[p*16+i4x4], + h->mb.pic.p_fenc[p]+block_idx_xy_fenc[i4x4], + h->mb.pic.p_fdec[p]+block_idx_xy_fdec[i4x4] ); + h->mb.cache.non_zero_count[x264_scan8[p*16+i4x4]] = nz; + h->mb.i_cbp_luma |= nz << (i4x4>>2); + } } else if( h->mb.b_transform_8x8 ) { - DECLARE_ALIGNED_16( int16_t dct8x8[4][8][8] ); - b_decimate &= !h->mb.b_trellis; // 8x8 trellis is inherently optimal decimation - h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] ); - h->nr_count[1] += h->mb.b_noise_reduction * 4; + ALIGNED_ARRAY_16( dctcoef, dct8x8,[4],[64] ); + b_decimate &= !h->mb.b_trellis || !h->param.b_cabac; // 8x8 trellis is inherently optimal decimation for CABAC - for( idx = 0; idx < 4; idx++ ) + for( int p = 0; p < plane_count; p++ ) { - if( h->mb.b_noise_reduction ) - h->quantf.denoise_dct( *dct8x8[idx], h->nr_residual_sum[1], h->nr_offset[1], 64 ); - nz = x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx ); + h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[p], h->mb.pic.p_fdec[p] ); + h->nr_count[1+!!p*2] += h->mb.b_noise_reduction * 4; - if( nz ) + int plane_cbp = 0; + for( int idx = 0; idx < 4; idx++ ) { - h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] ); - if( b_decimate ) + nz = x264_quant_8x8( h, dct8x8[idx], i_qp, ctx_cat_plane[DCT_LUMA_8x8][p], 0, p, idx ); + + if( nz ) { - int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] ); - i_decimate_mb += i_decimate_8x8; - if( i_decimate_8x8 >= 4 ) - h->mb.i_cbp_luma |= 1<zigzagf.scan_8x8( h->dct.luma8x8[p*4+idx], dct8x8[idx] ); + if( b_decimate ) + { + int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[p*4+idx] ); + i_decimate_mb += i_decimate_8x8; + if( i_decimate_8x8 >= 4 ) + plane_cbp |= 1<mb.i_cbp_luma |= 1<mb.i_cbp_luma = 0; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = 0; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = 0; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = 0; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = 0; - } - else - { - for( idx = 0; idx < 4; idx++ ) + if( i_decimate_mb < 6 && b_decimate ) + { + plane_cbp = 0; + CLEAR_16x16_NNZ( p ); + } + else { - if( h->mb.i_cbp_luma&(1<quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp ); - h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][(idx&1)*8 + (idx>>1)*8*FDEC_STRIDE], dct8x8[idx] ); - STORE_8x8_NNZ(idx,1); + int x = idx&1; + int y = idx>>1; + + if( plane_cbp&(1<quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[p?CQM_8PC:CQM_8PY], i_qp ); + h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[p][8*x + 8*y*FDEC_STRIDE], dct8x8[idx] ); + STORE_8x8_NNZ( p, idx, 1 ); + } + else + STORE_8x8_NNZ( p, idx, 0 ); } - else - STORE_8x8_NNZ(idx,0); } + h->mb.i_cbp_luma |= plane_cbp; + i_qp = h->mb.i_chroma_qp; } } else { - DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] ); - h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] ); - h->nr_count[0] += h->mb.b_noise_reduction * 16; - - for( i8x8 = 0; i8x8 < 4; i8x8++ ) + ALIGNED_ARRAY_16( dctcoef, dct4x4,[16],[16] ); + for( int p = 0; p < plane_count; p++ ) { - int i_decimate_8x8 = 0; - int cbp = 0; + h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[p], h->mb.pic.p_fdec[p] ); + h->nr_count[0+!!p*2] += h->mb.b_noise_reduction * 16; - /* encode one 4x4 block */ - for( i4x4 = 0; i4x4 < 4; i4x4++ ) + int plane_cbp = 0; + for( int i8x8 = 0; i8x8 < 4; i8x8++ ) { - idx = i8x8 * 4 + i4x4; + int i_decimate_8x8 = 0; + int cbp = 0; - if( h->mb.b_noise_reduction ) - h->quantf.denoise_dct( *dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 ); - nz = x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx ); - h->mb.cache.non_zero_count[x264_scan8[idx]] = nz; + /* encode one 4x4 block */ + for( int i4x4 = 0; i4x4 < 4; i4x4++ ) + { + int idx = i8x8 * 4 + i4x4; + + nz = x264_quant_4x4( h, dct4x4[idx], i_qp, ctx_cat_plane[DCT_LUMA_4x4][p], 0, p, idx ); + h->mb.cache.non_zero_count[x264_scan8[p*16+idx]] = nz; + + if( nz ) + { + h->zigzagf.scan_4x4( h->dct.luma4x4[p*16+idx], dct4x4[idx] ); + h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[p?CQM_4PC:CQM_4PY], i_qp ); + if( b_decimate && i_decimate_8x8 < 6 ) + i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[p*16+idx] ); + cbp = 1; + } + } - if( nz ) + int x = i8x8&1; + int y = i8x8>>1; + + /* decimate this 8x8 block */ + i_decimate_mb += i_decimate_8x8; + if( b_decimate ) + { + if( i_decimate_8x8 < 4 ) + STORE_8x8_NNZ( p, i8x8, 0 ); + else + plane_cbp |= 1<zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] ); - h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[CQM_4PY], i_qp ); - if( b_decimate && i_decimate_8x8 < 6 ) - i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] ); - cbp = 1; + h->dctf.add8x8_idct( &h->mb.pic.p_fdec[p][8*x + 8*y*FDEC_STRIDE], &dct4x4[i8x8*4] ); + plane_cbp |= 1<mb.i_cbp_luma |= 1<dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] ); - h->mb.i_cbp_luma |= 1<mb.i_cbp_luma = 0; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = 0; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = 0; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = 0; - *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = 0; - } - else - { - for( i8x8 = 0; i8x8 < 4; i8x8++ ) - if( h->mb.i_cbp_luma&(1<dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] ); + { + for( int i8x8 = 0; i8x8 < 4; i8x8++ ) + if( plane_cbp&(1<dctf.add8x8_idct( &h->mb.pic.p_fdec[p][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] ); + } } + h->mb.i_cbp_luma |= plane_cbp; + i_qp = h->mb.i_chroma_qp; } } } /* encode chroma */ - if( IS_INTRA( h->mb.i_type ) ) + if( chroma ) { - const int i_mode = h->mb.i_chroma_pred_mode; - if( h->mb.b_lossless ) - x264_predict_lossless_8x8_chroma( h, i_mode ); - else + if( IS_INTRA( h->mb.i_type ) ) { - h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] ); - h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] ); + int i_mode = h->mb.i_chroma_pred_mode; + if( h->mb.b_lossless ) + x264_predict_lossless_chroma( h, i_mode ); + else + { + h->predict_chroma[i_mode]( h->mb.pic.p_fdec[1] ); + h->predict_chroma[i_mode]( h->mb.pic.p_fdec[2] ); + } } - } - - /* encode the 8x8 blocks */ - x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp ); - if( h->param.b_cabac ) - { - i_cbp_dc = h->mb.cache.non_zero_count[x264_scan8[24]] - | h->mb.cache.non_zero_count[x264_scan8[25]] << 1 - | h->mb.cache.non_zero_count[x264_scan8[26]] << 2; + /* encode the 8x8 blocks */ + x264_mb_encode_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp ); } + else + h->mb.i_cbp_chroma = 0; /* store cbp */ - h->mb.cbp[h->mb.i_mb_xy] = (i_cbp_dc << 8) | (h->mb.i_cbp_chroma << 4) | h->mb.i_cbp_luma; + int cbp = h->mb.i_cbp_chroma << 4 | h->mb.i_cbp_luma; + if( h->param.b_cabac ) + cbp |= h->mb.cache.non_zero_count[x264_scan8[LUMA_DC ]] << 8 + | h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+0]] << 9 + | h->mb.cache.non_zero_count[x264_scan8[CHROMA_DC+1]] << 10; + h->mb.cbp[h->mb.i_mb_xy] = cbp; /* Check for P_SKIP * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account @@ -761,7 +905,7 @@ void x264_macroblock_encode( x264_t *h ) { if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) && - *(uint32_t*)h->mb.cache.mv[0][x264_scan8[0]] == *(uint32_t*)h->mb.cache.pskip_mv + M32( h->mb.cache.mv[0][x264_scan8[0]] ) == M32( h->mb.cache.pskip_mv ) && h->mb.cache.ref[0][x264_scan8[0]] == 0 ) { h->mb.i_type = P_SKIP; @@ -775,92 +919,146 @@ void x264_macroblock_encode( x264_t *h ) } } +void x264_macroblock_encode( x264_t *h ) +{ + if( CHROMA444 ) + x264_macroblock_encode_internal( h, 3, 0 ); + else + x264_macroblock_encode_internal( h, 1, 1 ); +} + /***************************************************************************** * x264_macroblock_probe_skip: - * Check if the current MB could be encoded as a [PB]_SKIP (it supposes you use - * the previous QP + * Check if the current MB could be encoded as a [PB]_SKIP *****************************************************************************/ -int x264_macroblock_probe_skip( x264_t *h, int b_bidir ) +static ALWAYS_INLINE int x264_macroblock_probe_skip_internal( x264_t *h, int b_bidir, int plane_count, int chroma ) { - DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] ); - DECLARE_ALIGNED_16( int16_t dct2x2[2][2] ); - DECLARE_ALIGNED_16( int16_t dctscan[16] ); - + ALIGNED_ARRAY_16( dctcoef, dct4x4,[8],[16] ); + ALIGNED_ARRAY_16( dctcoef, dctscan,[16] ); + ALIGNED_4( int16_t mvp[2] ); int i_qp = h->mb.i_qp; - int mvp[2]; - int ch, thresh; - - int i8x8, i4x4; - int i_decimate_mb; - if( !b_bidir ) + for( int p = 0; p < plane_count; p++ ) { - /* Get the MV */ - mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] ); - mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] ); - - /* Motion compensation */ - h->mc.mc_luma( h->mb.pic.p_fdec[0], FDEC_STRIDE, - h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0], - mvp[0], mvp[1], 16, 16 ); - } + int quant_cat = p ? CQM_4PC : CQM_4PY; + if( !b_bidir ) + { + /* Get the MV */ + mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] ); + mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] ); + + /* Motion compensation */ + h->mc.mc_luma( h->mb.pic.p_fdec[p], FDEC_STRIDE, + &h->mb.pic.p_fref[0][0][p*4], h->mb.pic.i_stride[p], + mvp[0], mvp[1], 16, 16, &h->sh.weight[0][p] ); + } - for( i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ ) - { - int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8; - int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8; - /* get luma diff */ - h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset, - h->mb.pic.p_fdec[0] + fdec_offset ); - /* encode one 4x4 block */ - for( i4x4 = 0; i4x4 < 4; i4x4++ ) + for( int i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ ) { - if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] ) ) - continue; - h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] ); - i_decimate_mb += h->quantf.decimate_score16( dctscan ); - if( i_decimate_mb >= 6 ) - return 0; + int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8; + int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8; + /* get luma diff */ + h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[p] + fenc_offset, + h->mb.pic.p_fdec[p] + fdec_offset ); + /* encode one 4x4 block */ + for( int i4x4 = 0; i4x4 < 4; i4x4++ ) + { + if( h->mb.b_noise_reduction ) + h->quantf.denoise_dct( dct4x4[i4x4], h->nr_residual_sum[0+!!p*2], h->nr_offset[0+!!p*2], 16 ); + if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[quant_cat][i_qp], h->quant4_bias[quant_cat][i_qp] ) ) + continue; + h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] ); + i_decimate_mb += h->quantf.decimate_score16( dctscan ); + if( i_decimate_mb >= 6 ) + return 0; + } } + i_qp = h->mb.i_chroma_qp; } - /* encode chroma */ - i_qp = h->mb.i_chroma_qp; - thresh = (x264_lambda2_tab[i_qp] + 32) >> 6; - - for( ch = 0; ch < 2; ch++ ) + if( chroma == CHROMA_420 || chroma == CHROMA_422 ) { - uint8_t *p_src = h->mb.pic.p_fenc[1+ch]; - uint8_t *p_dst = h->mb.pic.p_fdec[1+ch]; + i_qp = h->mb.i_chroma_qp; + int chroma422 = chroma == CHROMA_422; + int thresh = chroma422 ? (x264_lambda2_tab[i_qp] + 16) >> 5 : (x264_lambda2_tab[i_qp] + 32) >> 6; + int ssd; + ALIGNED_ARRAY_16( dctcoef, dct_dc,[8] ); if( !b_bidir ) { - h->mc.mc_chroma( h->mb.pic.p_fdec[1+ch], FDEC_STRIDE, - h->mb.pic.p_fref[0][0][4+ch], h->mb.pic.i_stride[1+ch], - mvp[0], mvp[1], 8, 8 ); + /* Special case for mv0, which is (of course) very common in P-skip mode. */ + if( M32( mvp ) ) + h->mc.mc_chroma( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], FDEC_STRIDE, + h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1], + mvp[0], mvp[1]<mc.load_deinterleave_chroma_fdec( h->mb.pic.p_fdec[1], h->mb.pic.p_fref[0][0][4], + h->mb.pic.i_stride[1], chroma422?16:8 ); } - /* there is almost never a termination during chroma, but we can't avoid the check entirely */ - /* so instead we check SSD and skip the actual check if the score is low enough. */ - if( h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE ) < thresh ) - continue; + for( int ch = 0; ch < 2; ch++ ) + { + pixel *p_src = h->mb.pic.p_fenc[1+ch]; + pixel *p_dst = h->mb.pic.p_fdec[1+ch]; + + if( !b_bidir && h->sh.weight[0][1+ch].weightfn ) + h->sh.weight[0][1+ch].weightfn[8>>2]( h->mb.pic.p_fdec[1+ch], FDEC_STRIDE, + h->mb.pic.p_fdec[1+ch], FDEC_STRIDE, + &h->sh.weight[0][1+ch], chroma422?16:8 ); + + /* there is almost never a termination during chroma, but we can't avoid the check entirely */ + /* so instead we check SSD and skip the actual check if the score is low enough. */ + ssd = h->pixf.ssd[chroma422?PIXEL_8x16:PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE ); + if( ssd < thresh ) + continue; + + /* The vast majority of chroma checks will terminate during the DC check or the higher + * threshold check, so we can save time by doing a DC-only DCT. */ + if( h->mb.b_noise_reduction ) + { + for( int i = 0; i <= chroma422; i++ ) + h->dctf.sub8x8_dct( &dct4x4[4*i], p_src + 8*i*FENC_STRIDE, p_dst + 8*i*FDEC_STRIDE ); - h->dctf.sub8x8_dct( dct4x4, p_src, p_dst ); + for( int i4x4 = 0; i4x4 < (chroma422?8:4); i4x4++ ) + { + h->quantf.denoise_dct( dct4x4[i4x4], h->nr_residual_sum[2], h->nr_offset[2], 16 ); + dct_dc[i4x4] = dct4x4[i4x4][0]; + } + } + else + { + if( chroma422 ) + h->dctf.sub8x16_dct_dc( dct_dc, p_src, p_dst ); + else + h->dctf.sub8x8_dct_dc( dct_dc, p_src, p_dst ); + } - /* calculate dct DC */ - dct2x2dc( dct2x2, dct4x4 ); - if( h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 ) ) - return 0; + for( int i = 0; i <= chroma422; i++ ) + if( h->quantf.quant_2x2_dc( &dct_dc[4*i], h->quant4_mf[CQM_4PC][i_qp+3*chroma422][0] >> 1, + h->quant4_bias[CQM_4PC][i_qp+3*chroma422][0] << 1 ) ) + return 0; - /* calculate dct coeffs */ - for( i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ ) - { - if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ) ) + /* If there wasn't a termination in DC, we can check against a much higher threshold. */ + if( ssd < thresh*4 ) continue; - h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] ); - i_decimate_mb += h->quantf.decimate_score15( dctscan ); - if( i_decimate_mb >= 7 ) - return 0; + + if( !h->mb.b_noise_reduction ) + for( int i = 0; i <= chroma422; i++ ) + h->dctf.sub8x8_dct( &dct4x4[4*i], p_src + 8*i*FENC_STRIDE, p_dst + 8*i*FDEC_STRIDE ); + + /* calculate dct coeffs */ + for( int i4x4 = 0, i_decimate_mb = 0; i4x4 < (chroma422?8:4); i4x4++ ) + { + dct4x4[i4x4][0] = 0; + if( h->mb.b_noise_reduction ) + h->quantf.denoise_dct( dct4x4[i4x4], h->nr_residual_sum[2], h->nr_offset[2], 16 ); + if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ) ) + continue; + h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] ); + i_decimate_mb += h->quantf.decimate_score15( dctscan ); + if( i_decimate_mb >= 7 ) + return 0; + } } } @@ -868,6 +1066,16 @@ int x264_macroblock_probe_skip( x264_t *h, int b_bidir ) return 1; } +int x264_macroblock_probe_skip( x264_t *h, int b_bidir ) +{ + if( CHROMA_FORMAT == CHROMA_444 ) + return x264_macroblock_probe_skip_internal( h, b_bidir, 3, CHROMA_444 ); + else if( CHROMA_FORMAT == CHROMA_422 ) + return x264_macroblock_probe_skip_internal( h, b_bidir, 1, CHROMA_422 ); + else + return x264_macroblock_probe_skip_internal( h, b_bidir, 1, CHROMA_420 ); +} + /**************************************************************************** * DCT-domain noise reduction / adaptive deadzone * from libavcodec @@ -875,24 +1083,30 @@ int x264_macroblock_probe_skip( x264_t *h, int b_bidir ) void x264_noise_reduction_update( x264_t *h ) { - int cat, i; - for( cat = 0; cat < 2; cat++ ) + h->nr_offset = h->nr_offset_denoise; + h->nr_residual_sum = h->nr_residual_sum_buf[0]; + h->nr_count = h->nr_count_buf[0]; + for( int cat = 0; cat < 3 + CHROMA444; cat++ ) { - int size = cat ? 64 : 16; - const uint16_t *weight = cat ? x264_dct8_weight2_tab : x264_dct4_weight2_tab; + int dct8x8 = cat&1; + int size = dct8x8 ? 64 : 16; + const uint32_t *weight = dct8x8 ? x264_dct8_weight2_tab : x264_dct4_weight2_tab; - if( h->nr_count[cat] > (cat ? (1<<16) : (1<<18)) ) + if( h->nr_count[cat] > (dct8x8 ? (1<<16) : (1<<18)) ) { - for( i = 0; i < size; i++ ) + for( int i = 0; i < size; i++ ) h->nr_residual_sum[cat][i] >>= 1; h->nr_count[cat] >>= 1; } - for( i = 0; i < size; i++ ) + for( int i = 0; i < size; i++ ) h->nr_offset[cat][i] = ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat] + h->nr_residual_sum[cat][i]/2) / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1); + + /* Don't denoise DC coefficients */ + h->nr_offset[cat][0] = 0; } } @@ -900,164 +1114,224 @@ void x264_noise_reduction_update( x264_t *h ) * RD only; 4 calls to this do not make up for one macroblock_encode. * doesn't transform chroma dc. *****************************************************************************/ -void x264_macroblock_encode_p8x8( x264_t *h, int i8 ) +static ALWAYS_INLINE void x264_macroblock_encode_p8x8_internal( x264_t *h, int i8, int plane_count, int chroma ) { + int b_decimate = h->mb.b_dct_decimate; int i_qp = h->mb.i_qp; - uint8_t *p_fenc = h->mb.pic.p_fenc[0] + (i8&1)*8 + (i8>>1)*8*FENC_STRIDE; - uint8_t *p_fdec = h->mb.pic.p_fdec[0] + (i8&1)*8 + (i8>>1)*8*FDEC_STRIDE; - int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate; - int nnz8x8 = 0; - int ch, nz; + int x = i8&1; + int y = i8>>1; + int nz; + int chroma422 = chroma == CHROMA_422; + + h->mb.i_cbp_chroma = 0; + h->mb.i_cbp_luma &= ~(1 << i8); - x264_mb_mc_8x8( h, i8 ); + if( !h->mb.b_skip_mc ) + x264_mb_mc_8x8( h, i8 ); if( h->mb.b_lossless ) { - int i4; - if( h->mb.b_transform_8x8 ) + for( int p = 0; p < plane_count; p++ ) { - h->zigzagf.sub_8x8( h->dct.luma8x8[i8], p_fenc, p_fdec ); - nnz8x8 = array_non_zero( h->dct.luma8x8[i8] ); - STORE_8x8_NNZ(i8,nnz8x8); - } - else - { - for( i4 = i8*4; i4 < i8*4+4; i4++ ) + pixel *p_fenc = h->mb.pic.p_fenc[p] + 8*x + 8*y*FENC_STRIDE; + pixel *p_fdec = h->mb.pic.p_fdec[p] + 8*x + 8*y*FDEC_STRIDE; + int nnz8x8 = 0; + if( h->mb.b_transform_8x8 ) { - int nz; - h->zigzagf.sub_4x4( h->dct.luma4x4[i4], - h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4], - h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4] ); - nz = array_non_zero( h->dct.luma4x4[i4] ); - h->mb.cache.non_zero_count[x264_scan8[i4]] = nz; - nnz8x8 |= nz; + nnz8x8 = h->zigzagf.sub_8x8( h->dct.luma8x8[4*p+i8], p_fenc, p_fdec ); + STORE_8x8_NNZ( p, i8, nnz8x8 ); } + else + { + for( int i4 = i8*4; i4 < i8*4+4; i4++ ) + { + nz = h->zigzagf.sub_4x4( h->dct.luma4x4[16*p+i4], + h->mb.pic.p_fenc[p]+block_idx_xy_fenc[i4], + h->mb.pic.p_fdec[p]+block_idx_xy_fdec[i4] ); + h->mb.cache.non_zero_count[x264_scan8[16*p+i4]] = nz; + nnz8x8 |= nz; + } + } + h->mb.i_cbp_luma |= nnz8x8 << i8; } - for( ch = 0; ch < 2; ch++ ) + if( chroma == CHROMA_420 || chroma == CHROMA_422 ) { - p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE; - p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE; - h->zigzagf.sub_4x4( h->dct.luma4x4[16+i8+ch*4], p_fenc, p_fdec ); - h->dct.luma4x4[16+i8+ch*4][0] = 0; - h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = array_non_zero( h->dct.luma4x4[16+i8+ch*4] ); + for( int ch = 0; ch < 2; ch++ ) + { + dctcoef dc; + pixel *p_fenc = h->mb.pic.p_fenc[1+ch] + 4*x + (chroma422?8:4)*y*FENC_STRIDE; + pixel *p_fdec = h->mb.pic.p_fdec[1+ch] + 4*x + (chroma422?8:4)*y*FDEC_STRIDE; + + for( int i4x4 = 0; i4x4 <= chroma422; i4x4++ ) + { + int offset = chroma422 ? 8*y + 2*i4x4 + x : i8; + nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+offset+ch*16], p_fenc+4*i4x4*FENC_STRIDE, p_fdec+4*i4x4*FDEC_STRIDE, &dc ); + h->mb.cache.non_zero_count[x264_scan8[16+offset+ch*16]] = nz; + } + } + h->mb.i_cbp_chroma = 0x02; } } else { if( h->mb.b_transform_8x8 ) { - DECLARE_ALIGNED_16( int16_t dct8x8[8][8] ); - h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec ); - nnz8x8 = x264_quant_8x8( h, dct8x8, i_qp, 0, i8 ); - if( nnz8x8 ) + for( int p = 0; p < plane_count; p++ ) { - h->zigzagf.scan_8x8( h->dct.luma8x8[i8], dct8x8 ); - - if( b_decimate && !h->mb.b_trellis ) - nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[i8] ); - + int quant_cat = p ? CQM_8PC : CQM_8PY; + pixel *p_fenc = h->mb.pic.p_fenc[p] + 8*x + 8*y*FENC_STRIDE; + pixel *p_fdec = h->mb.pic.p_fdec[p] + 8*x + 8*y*FDEC_STRIDE; + ALIGNED_ARRAY_16( dctcoef, dct8x8,[64] ); + h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec ); + int nnz8x8 = x264_quant_8x8( h, dct8x8, i_qp, ctx_cat_plane[DCT_LUMA_8x8][p], 0, p, i8 ); if( nnz8x8 ) { - h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8PY], i_qp ); - h->dctf.add8x8_idct8( p_fdec, dct8x8 ); - STORE_8x8_NNZ(i8,1); + h->zigzagf.scan_8x8( h->dct.luma8x8[4*p+i8], dct8x8 ); + + if( b_decimate && !h->mb.b_trellis ) + nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[4*p+i8] ); + + if( nnz8x8 ) + { + h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[quant_cat], i_qp ); + h->dctf.add8x8_idct8( p_fdec, dct8x8 ); + STORE_8x8_NNZ( p, i8, 1 ); + } + else + STORE_8x8_NNZ( p, i8, 0 ); } else - STORE_8x8_NNZ(i8,0); + STORE_8x8_NNZ( p, i8, 0 ); + h->mb.i_cbp_luma |= nnz8x8 << i8; + i_qp = h->mb.i_chroma_qp; } - else - STORE_8x8_NNZ(i8,0); } else { - int i4; - int i_decimate_8x8 = 0; - DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] ); - h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec ); - for( i4 = 0; i4 < 4; i4++ ) + for( int p = 0; p < plane_count; p++ ) { - nz = x264_quant_4x4( h, dct4x4[i4], i_qp, DCT_LUMA_4x4, 0, i8*4+i4 ); - h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = nz; - if( nz ) + int quant_cat = p ? CQM_4PC : CQM_4PY; + pixel *p_fenc = h->mb.pic.p_fenc[p] + 8*x + 8*y*FENC_STRIDE; + pixel *p_fdec = h->mb.pic.p_fdec[p] + 8*x + 8*y*FDEC_STRIDE; + int i_decimate_8x8 = 0, nnz8x8 = 0; + ALIGNED_ARRAY_16( dctcoef, dct4x4,[4],[16] ); + h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec ); + for( int i4 = 0; i4 < 4; i4++ ) { - h->zigzagf.scan_4x4( h->dct.luma4x4[i8*4+i4], dct4x4[i4] ); - h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[CQM_4PY], i_qp ); - if( b_decimate ) - i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[i8*4+i4] ); - nnz8x8 = 1; + nz = x264_quant_4x4( h, dct4x4[i4], i_qp, ctx_cat_plane[DCT_LUMA_4x4][p], 0, p, i8*4+i4 ); + h->mb.cache.non_zero_count[x264_scan8[p*16+i8*4+i4]] = nz; + if( nz ) + { + h->zigzagf.scan_4x4( h->dct.luma4x4[p*16+i8*4+i4], dct4x4[i4] ); + h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[quant_cat], i_qp ); + if( b_decimate ) + i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[p*16+i8*4+i4] ); + nnz8x8 = 1; + } } - } - if( b_decimate && i_decimate_8x8 < 4 ) - nnz8x8 = 0; + if( b_decimate && i_decimate_8x8 < 4 ) + nnz8x8 = 0; - if( nnz8x8 ) - h->dctf.add8x8_idct( p_fdec, dct4x4 ); - else - STORE_8x8_NNZ(i8,0); - } + if( nnz8x8 ) + h->dctf.add8x8_idct( p_fdec, dct4x4 ); + else + STORE_8x8_NNZ( p, i8, 0 ); - i_qp = h->mb.i_chroma_qp; + h->mb.i_cbp_luma |= nnz8x8 << i8; + i_qp = h->mb.i_chroma_qp; + } + } - for( ch = 0; ch < 2; ch++ ) + if( chroma == CHROMA_420 || chroma == CHROMA_422 ) { - DECLARE_ALIGNED_16( int16_t dct4x4[4][4] ); - p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE; - p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE; + i_qp = h->mb.i_chroma_qp; + for( int ch = 0; ch < 2; ch++ ) + { + ALIGNED_ARRAY_16( dctcoef, dct4x4,[2],[16] ); + pixel *p_fenc = h->mb.pic.p_fenc[1+ch] + 4*x + (chroma422?8:4)*y*FENC_STRIDE; + pixel *p_fdec = h->mb.pic.p_fdec[1+ch] + 4*x + (chroma422?8:4)*y*FDEC_STRIDE; - h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec ); - dct4x4[0][0] = 0; + for( int i4x4 = 0; i4x4 <= chroma422; i4x4++ ) + { + h->dctf.sub4x4_dct( dct4x4[i4x4], p_fenc + 4*i4x4*FENC_STRIDE, p_fdec + 4*i4x4*FDEC_STRIDE ); - if( h->mb.b_trellis ) - nz = x264_quant_4x4_trellis( h, dct4x4, CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 0 ); - else - nz = h->quantf.quant_4x4( dct4x4, h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ); + if( h->mb.b_noise_reduction ) + h->quantf.denoise_dct( dct4x4[i4x4], h->nr_residual_sum[2], h->nr_offset[2], 16 ); + dct4x4[i4x4][0] = 0; - h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz; - if( nz ) - { - h->zigzagf.scan_4x4( h->dct.luma4x4[16+i8+ch*4], dct4x4 ); - h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PC], i_qp ); - h->dctf.add4x4_idct( p_fdec, dct4x4 ); + if( h->mb.b_trellis ) + nz = x264_quant_4x4_trellis( h, dct4x4[i4x4], CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 1, 0 ); + else + nz = h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ); + + int offset = chroma422 ? ((5*i8) & 0x09) + 2*i4x4 : i8; + h->mb.cache.non_zero_count[x264_scan8[16+offset+ch*16]] = nz; + if( nz ) + { + h->zigzagf.scan_4x4( h->dct.luma4x4[16+offset+ch*16], dct4x4[i4x4] ); + h->quantf.dequant_4x4( dct4x4[i4x4], h->dequant4_mf[CQM_4PC], i_qp ); + h->dctf.add4x4_idct( p_fdec + 4*i4x4*FDEC_STRIDE, dct4x4[i4x4] ); + } + } } + h->mb.i_cbp_chroma = 0x02; } } - h->mb.i_cbp_luma &= ~(1 << i8); - h->mb.i_cbp_luma |= nnz8x8 << i8; - h->mb.i_cbp_chroma = 0x02; +} + +void x264_macroblock_encode_p8x8( x264_t *h, int i8 ) +{ + if( CHROMA444 ) + x264_macroblock_encode_p8x8_internal( h, i8, 3, CHROMA_444 ); + else if( CHROMA_FORMAT == CHROMA_422 ) + x264_macroblock_encode_p8x8_internal( h, i8, 1, CHROMA_422 ); + else + x264_macroblock_encode_p8x8_internal( h, i8, 1, CHROMA_420 ); } /***************************************************************************** - * RD only, luma only + * RD only, luma only (for 4:2:0) *****************************************************************************/ -void x264_macroblock_encode_p4x4( x264_t *h, int i4 ) +static ALWAYS_INLINE void x264_macroblock_encode_p4x4_internal( x264_t *h, int i4, int plane_count ) { int i_qp = h->mb.i_qp; - uint8_t *p_fenc = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[i4]]; - uint8_t *p_fdec = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]]; - const int i_ref = h->mb.cache.ref[0][x264_scan8[i4]]; - const int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[i4]][0], h->mb.mv_min[0], h->mb.mv_max[0] ); - const int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[i4]][1], h->mb.mv_min[1], h->mb.mv_max[1] ); - int nz; - - h->mc.mc_luma( p_fdec, FDEC_STRIDE, h->mb.pic.p_fref[0][i_ref], h->mb.pic.i_stride[0], mvx + 4*4*block_idx_x[i4], mvy + 4*4*block_idx_y[i4], 4, 4 ); - if( h->mb.b_lossless ) - { - h->zigzagf.sub_4x4( h->dct.luma4x4[i4], p_fenc, p_fdec ); - h->mb.cache.non_zero_count[x264_scan8[i4]] = array_non_zero( h->dct.luma4x4[i4] ); - } - else + for( int p = 0; p < plane_count; p++ ) { - DECLARE_ALIGNED_16( int16_t dct4x4[4][4] ); - h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec ); - nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 0, i4 ); - h->mb.cache.non_zero_count[x264_scan8[i4]] = nz; - if( nz ) + int quant_cat = p ? CQM_4PC : CQM_4PY; + pixel *p_fenc = &h->mb.pic.p_fenc[p][block_idx_xy_fenc[i4]]; + pixel *p_fdec = &h->mb.pic.p_fdec[p][block_idx_xy_fdec[i4]]; + int nz; + + /* Don't need motion compensation as this function is only used in qpel-RD, which caches pixel data. */ + + if( h->mb.b_lossless ) { - h->zigzagf.scan_4x4( h->dct.luma4x4[i4], dct4x4 ); - h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PY], i_qp ); - h->dctf.add4x4_idct( p_fdec, dct4x4 ); + nz = h->zigzagf.sub_4x4( h->dct.luma4x4[p*16+i4], p_fenc, p_fdec ); + h->mb.cache.non_zero_count[x264_scan8[p*16+i4]] = nz; } + else + { + ALIGNED_ARRAY_16( dctcoef, dct4x4,[16] ); + h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec ); + nz = x264_quant_4x4( h, dct4x4, i_qp, ctx_cat_plane[DCT_LUMA_4x4][p], 0, p, i4 ); + h->mb.cache.non_zero_count[x264_scan8[p*16+i4]] = nz; + if( nz ) + { + h->zigzagf.scan_4x4( h->dct.luma4x4[p*16+i4], dct4x4 ); + h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[quant_cat], i_qp ); + h->dctf.add4x4_idct( p_fdec, dct4x4 ); + } + } + i_qp = h->mb.i_chroma_qp; } } + +void x264_macroblock_encode_p4x4( x264_t *h, int i8 ) +{ + if( CHROMA444 ) + x264_macroblock_encode_p4x4_internal( h, i8, 3 ); + else + x264_macroblock_encode_p4x4_internal( h, i8, 1 ); +}