1 /*****************************************************************************
2 * macroblock.c: h264 encoder library
3 *****************************************************************************
4 * Copyright (C) 2003-2008 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 *****************************************************************************/
25 #include "common/common.h"
26 #include "macroblock.h"
28 /* These chroma DC functions don't have assembly versions and are only used here. */
30 #define ZIG(i,y,x) level[i] = dct[x*2+y];
31 static inline void zigzag_scan_2x2_dc( int16_t level[4], int16_t dct[4] )
40 #define IDCT_DEQUANT_START \
41 int d0 = dct[0] + dct[1]; \
42 int d1 = dct[2] + dct[3]; \
43 int d2 = dct[0] - dct[1]; \
44 int d3 = dct[2] - dct[3]; \
45 int dmf = dequant_mf[i_qp%6][0] << i_qp/6;
47 static inline void idct_dequant_2x2_dc( int16_t dct[4], int16_t dct4x4[4][16], int dequant_mf[6][16], int i_qp )
50 dct4x4[0][0] = (d0 + d1) * dmf >> 5;
51 dct4x4[1][0] = (d0 - d1) * dmf >> 5;
52 dct4x4[2][0] = (d2 + d3) * dmf >> 5;
53 dct4x4[3][0] = (d2 - d3) * dmf >> 5;
56 static inline void idct_dequant_2x2_dconly( int16_t out[4], int16_t dct[4], int dequant_mf[6][16], int i_qp )
59 out[0] = (d0 + d1) * dmf >> 5;
60 out[1] = (d0 - d1) * dmf >> 5;
61 out[2] = (d2 + d3) * dmf >> 5;
62 out[3] = (d2 - d3) * dmf >> 5;
65 static inline void dct2x2dc( int16_t d[4], int16_t dct4x4[4][16] )
67 int d0 = dct4x4[0][0] + dct4x4[1][0];
68 int d1 = dct4x4[2][0] + dct4x4[3][0];
69 int d2 = dct4x4[0][0] - dct4x4[1][0];
70 int d3 = dct4x4[2][0] - dct4x4[3][0];
81 static inline void dct2x2dc_dconly( int16_t d[4] )
93 static ALWAYS_INLINE int x264_quant_4x4( x264_t *h, int16_t dct[16], int i_qp, int i_ctxBlockCat, int b_intra, int idx )
95 int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY;
97 return x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, i_ctxBlockCat, b_intra, 0, idx );
99 return h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
102 static ALWAYS_INLINE int x264_quant_8x8( x264_t *h, int16_t dct[64], int i_qp, int b_intra, int idx )
104 int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY;
105 if( h->mb.b_trellis )
106 return x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx );
108 return h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] );
111 /* All encoding functions must output the correct CBP and NNZ values.
112 * The entropy coding functions will check CBP first, then NNZ, before
113 * actually reading the DCT coefficients. NNZ still must be correct even
114 * if CBP is zero because of the use of NNZ values for context selection.
115 * "NNZ" need only be 0 or 1 rather than the exact coefficient count because
116 * that is only needed in CAVLC, and will be calculated by CAVLC's residual
117 * coding and stored as necessary. */
119 /* This means that decimation can be done merely by adjusting the CBP and NNZ
120 * rather than memsetting the coefficients. */
122 void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp )
125 uint8_t *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]];
126 uint8_t *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]];
127 ALIGNED_ARRAY_16( int16_t, dct4x4,[16] );
129 if( h->mb.b_lossless )
131 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst );
132 h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
133 h->mb.i_cbp_luma |= nz<<(idx>>2);
137 h->dctf.sub4x4_dct( dct4x4, p_src, p_dst );
139 nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx );
140 h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
143 h->mb.i_cbp_luma |= 1<<(idx>>2);
144 h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 );
145 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp );
146 h->dctf.add4x4_idct( p_dst, dct4x4 );
150 #define STORE_8x8_NNZ(idx,nz)\
152 M16( &h->mb.cache.non_zero_count[x264_scan8[idx*4+0]] ) = nz * 0x0101;\
153 M16( &h->mb.cache.non_zero_count[x264_scan8[idx*4+2]] ) = nz * 0x0101;\
156 #define CLEAR_16x16_NNZ \
158 M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = 0;\
159 M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = 0;\
160 M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = 0;\
161 M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = 0;\
164 void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp )
167 int y = 8 * (idx>>1);
169 uint8_t *p_src = &h->mb.pic.p_fenc[0][x+y*FENC_STRIDE];
170 uint8_t *p_dst = &h->mb.pic.p_fdec[0][x+y*FDEC_STRIDE];
171 ALIGNED_ARRAY_16( int16_t, dct8x8,[64] );
173 if( h->mb.b_lossless )
175 nz = h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst );
176 STORE_8x8_NNZ(idx,nz);
177 h->mb.i_cbp_luma |= nz<<idx;
181 h->dctf.sub8x8_dct8( dct8x8, p_src, p_dst );
183 nz = x264_quant_8x8( h, dct8x8, i_qp, 1, idx );
186 h->mb.i_cbp_luma |= 1<<idx;
187 h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 );
188 h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp );
189 h->dctf.add8x8_idct8( p_dst, dct8x8 );
190 STORE_8x8_NNZ(idx,1);
193 STORE_8x8_NNZ(idx,0);
196 static void x264_mb_encode_i16x16( x264_t *h, int i_qp )
198 uint8_t *p_src = h->mb.pic.p_fenc[0];
199 uint8_t *p_dst = h->mb.pic.p_fdec[0];
201 ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[16] );
202 ALIGNED_ARRAY_16( int16_t, dct_dc4x4,[16] );
205 int decimate_score = h->mb.b_dct_decimate ? 0 : 9;
207 if( h->mb.b_lossless )
209 for( i = 0; i < 16; i++ )
211 int oe = block_idx_xy_fenc[i];
212 int od = block_idx_xy_fdec[i];
213 nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[i], p_src+oe, p_dst+od, &dct_dc4x4[block_idx_yx_1d[i]] );
214 h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
215 h->mb.i_cbp_luma |= nz;
217 h->mb.i_cbp_luma *= 0xf;
218 h->mb.cache.non_zero_count[x264_scan8[24]] = array_non_zero( dct_dc4x4 );
219 h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
223 h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
225 for( i = 0; i < 16; i++ )
228 dct_dc4x4[block_idx_xy_1d[i]] = dct4x4[i][0];
231 /* quant/scan/dequant */
232 nz = x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i );
233 h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
236 h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] );
237 h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp );
238 if( decimate_score < 6 ) decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[i] );
239 h->mb.i_cbp_luma = 0xf;
243 /* Writing the 16 CBFs in an i16x16 block is quite costly, so decimation can save many bits. */
244 /* More useful with CAVLC, but still useful with CABAC. */
245 if( decimate_score < 6 )
247 h->mb.i_cbp_luma = 0;
251 h->dctf.dct4x4dc( dct_dc4x4 );
252 if( h->mb.b_trellis )
253 nz = x264_quant_dc_trellis( h, dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1, 0 );
255 nz = h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 );
257 h->mb.cache.non_zero_count[x264_scan8[24]] = nz;
260 h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
262 /* output samples to fdec */
263 h->dctf.idct4x4dc( dct_dc4x4 );
264 h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp ); /* XXX not inversed */
265 if( h->mb.i_cbp_luma )
266 for( i = 0; i < 16; i++ )
267 dct4x4[i][0] = dct_dc4x4[block_idx_xy_1d[i]];
270 /* put pixels to fdec */
271 if( h->mb.i_cbp_luma )
272 h->dctf.add16x16_idct( p_dst, dct4x4 );
274 h->dctf.add16x16_idct_dc( p_dst, dct_dc4x4 );
277 static inline int idct_dequant_round_2x2_dc( int16_t ref[4], int16_t dct[4], int dequant_mf[6][16], int i_qp )
280 idct_dequant_2x2_dconly( out, dct, dequant_mf, i_qp );
281 return ((ref[0] ^ (out[0]+32))
282 | (ref[1] ^ (out[1]+32))
283 | (ref[2] ^ (out[2]+32))
284 | (ref[3] ^ (out[3]+32))) >> 6;
287 /* Round down coefficients losslessly in DC-only chroma blocks.
288 * Unlike luma blocks, this can't be done with a lookup table or
289 * other shortcut technique because of the interdependencies
290 * between the coefficients due to the chroma DC transform. */
291 static inline int x264_mb_optimize_chroma_dc( x264_t *h, int b_inter, int i_qp, int16_t dct2x2[4] )
293 int16_t dct2x2_orig[4];
297 /* If the QP is too high, there's no benefit to rounding optimization. */
298 if( h->dequant4_mf[CQM_4IC + b_inter][i_qp%6][0] << (i_qp/6) > 32*64 )
301 idct_dequant_2x2_dconly( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
302 dct2x2_orig[0] += 32;
303 dct2x2_orig[1] += 32;
304 dct2x2_orig[2] += 32;
305 dct2x2_orig[3] += 32;
307 /* If the DC coefficients already round to zero, terminate early. */
308 if( !((dct2x2_orig[0]|dct2x2_orig[1]|dct2x2_orig[2]|dct2x2_orig[3])>>6) )
311 /* Start with the highest frequency coefficient... is this the best option? */
312 for( coeff = 3; coeff >= 0; coeff-- )
314 int sign = dct2x2[coeff] < 0 ? -1 : 1;
315 int level = dct2x2[coeff];
322 dct2x2[coeff] = level - sign;
323 if( idct_dequant_round_2x2_dc( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp ) )
329 dct2x2[coeff] = level;
335 void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp )
337 int i, ch, nz, nz_dc;
338 int b_decimate = b_inter && h->mb.b_dct_decimate;
339 ALIGNED_ARRAY_16( int16_t, dct2x2,[4] );
340 h->mb.i_cbp_chroma = 0;
342 /* Early termination: check variance of chroma residual before encoding.
343 * Don't bother trying early termination at low QPs.
344 * Values are experimentally derived. */
345 if( b_decimate && i_qp >= (h->mb.b_trellis ? 12 : 18) )
347 int thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
349 int score = h->pixf.var2_8x8( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE, &ssd[0] );
350 score += h->pixf.var2_8x8( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE, &ssd[1] );
351 if( score < thresh*4 )
353 h->mb.cache.non_zero_count[x264_scan8[16]] = 0;
354 h->mb.cache.non_zero_count[x264_scan8[17]] = 0;
355 h->mb.cache.non_zero_count[x264_scan8[18]] = 0;
356 h->mb.cache.non_zero_count[x264_scan8[19]] = 0;
357 h->mb.cache.non_zero_count[x264_scan8[20]] = 0;
358 h->mb.cache.non_zero_count[x264_scan8[21]] = 0;
359 h->mb.cache.non_zero_count[x264_scan8[22]] = 0;
360 h->mb.cache.non_zero_count[x264_scan8[23]] = 0;
361 h->mb.cache.non_zero_count[x264_scan8[25]] = 0;
362 h->mb.cache.non_zero_count[x264_scan8[26]] = 0;
363 for( ch = 0; ch < 2; ch++ )
365 if( ssd[ch] > thresh )
367 h->dctf.sub8x8_dct_dc( dct2x2, h->mb.pic.p_fenc[1+ch], h->mb.pic.p_fdec[1+ch] );
368 dct2x2dc_dconly( dct2x2 );
369 if( h->mb.b_trellis )
370 nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
372 nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<
377 if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
379 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 1;
380 zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
381 idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
382 h->dctf.add8x8_idct_dc( h->mb.pic.p_fdec[1+ch], dct2x2 );
383 h->mb.i_cbp_chroma = 1;
391 for( ch = 0; ch < 2; ch++ )
393 uint8_t *p_src = h->mb.pic.p_fenc[1+ch];
394 uint8_t *p_dst = h->mb.pic.p_fdec[1+ch];
395 int i_decimate_score = 0;
398 ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[16] );
400 if( h->mb.b_lossless )
402 for( i = 0; i < 4; i++ )
404 int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE;
405 int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE;
406 nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od, &h->dct.chroma_dc[ch][i] );
407 h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
408 h->mb.i_cbp_chroma |= nz;
410 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = array_non_zero( h->dct.chroma_dc[ch] );
414 h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
415 dct2x2dc( dct2x2, dct4x4 );
416 /* calculate dct coeffs */
417 for( i = 0; i < 4; i++ )
419 if( h->mb.b_trellis )
420 nz = x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 1, 0 );
422 nz = h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] );
423 h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
427 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] );
428 h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp );
430 i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] );
434 if( h->mb.b_trellis )
435 nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
437 nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
439 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = nz_dc;
441 if( (b_decimate && i_decimate_score < 7) || !nz_ac )
443 /* Decimate the block */
444 h->mb.cache.non_zero_count[x264_scan8[16+0]+24*ch] = 0;
445 h->mb.cache.non_zero_count[x264_scan8[16+1]+24*ch] = 0;
446 h->mb.cache.non_zero_count[x264_scan8[16+2]+24*ch] = 0;
447 h->mb.cache.non_zero_count[x264_scan8[16+3]+24*ch] = 0;
448 if( !nz_dc ) /* Whole block is empty */
450 if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
452 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 0;
456 zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
457 idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
458 h->dctf.add8x8_idct_dc( p_dst, dct2x2 );
462 h->mb.i_cbp_chroma = 1;
465 zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
466 idct_dequant_2x2_dc( dct2x2, dct4x4, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
468 h->dctf.add8x8_idct( p_dst, dct4x4 );
472 if( h->mb.i_cbp_chroma )
473 h->mb.i_cbp_chroma = 2; /* dc+ac (we can't do only ac) */
474 else if( h->mb.cache.non_zero_count[x264_scan8[25]] |
475 h->mb.cache.non_zero_count[x264_scan8[26]] )
476 h->mb.i_cbp_chroma = 1; /* dc only */
479 static void x264_macroblock_encode_skip( x264_t *h )
481 h->mb.i_cbp_luma = 0x00;
482 h->mb.i_cbp_chroma = 0x00;
483 memset( h->mb.cache.non_zero_count, 0, X264_SCAN8_SIZE );
485 h->mb.cbp[h->mb.i_mb_xy] = 0;
488 /*****************************************************************************
489 * x264_macroblock_encode_pskip:
490 * Encode an already marked skip block
491 *****************************************************************************/
492 static void x264_macroblock_encode_pskip( x264_t *h )
494 const int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
495 h->mb.mv_min[0], h->mb.mv_max[0] );
496 const int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
497 h->mb.mv_min[1], h->mb.mv_max[1] );
499 /* don't do pskip motion compensation if it was already done in macroblock_analyse */
500 if( !h->mb.b_skip_mc )
502 h->mc.mc_luma( h->mb.pic.p_fdec[0], FDEC_STRIDE,
503 h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
504 mvx, mvy, 16, 16, &h->sh.weight[0][0] );
506 h->mc.mc_chroma( h->mb.pic.p_fdec[1], FDEC_STRIDE,
507 h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
510 if( h->sh.weight[0][1].weightfn )
511 h->sh.weight[0][1].weightfn[8>>2]( h->mb.pic.p_fdec[1], FDEC_STRIDE,
512 h->mb.pic.p_fdec[1], FDEC_STRIDE,
513 &h->sh.weight[0][1], 8 );
515 h->mc.mc_chroma( h->mb.pic.p_fdec[2], FDEC_STRIDE,
516 h->mb.pic.p_fref[0][0][5], h->mb.pic.i_stride[2],
519 if( h->sh.weight[0][2].weightfn )
520 h->sh.weight[0][2].weightfn[8>>2]( h->mb.pic.p_fdec[2], FDEC_STRIDE,
521 h->mb.pic.p_fdec[2], FDEC_STRIDE,
522 &h->sh.weight[0][2], 8 );
525 x264_macroblock_encode_skip( h );
528 /*****************************************************************************
529 * Intra prediction for predictive lossless mode.
530 *****************************************************************************/
532 /* Note that these functions take a shortcut (mc.copy instead of actual pixel prediction) which assumes
533 * that the edge pixels of the reconstructed frame are the same as that of the source frame. This means
534 * they will only work correctly if the neighboring blocks are losslessly coded. In practice, this means
535 * lossless mode cannot be mixed with lossy mode within a frame. */
536 /* This can be resolved by explicitly copying the edge pixels after doing the mc.copy, but this doesn't
537 * need to be done unless we decide to allow mixing lossless and lossy compression. */
539 void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode )
541 int stride = h->fenc->i_stride[1] << h->mb.b_interlaced;
542 if( i_mode == I_PRED_CHROMA_V )
544 h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-stride, stride, 8 );
545 h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-stride, stride, 8 );
547 else if( i_mode == I_PRED_CHROMA_H )
549 h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-1, stride, 8 );
550 h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-1, stride, 8 );
554 h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
555 h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
559 void x264_predict_lossless_4x4( x264_t *h, uint8_t *p_dst, int idx, int i_mode )
561 int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
562 uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
564 if( i_mode == I_PRED_4x4_V )
565 h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
566 else if( i_mode == I_PRED_4x4_H )
567 h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
569 h->predict_4x4[i_mode]( p_dst );
572 void x264_predict_lossless_8x8( x264_t *h, uint8_t *p_dst, int idx, int i_mode, uint8_t edge[33] )
574 int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
575 uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
577 if( i_mode == I_PRED_8x8_V )
578 h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
579 else if( i_mode == I_PRED_8x8_H )
580 h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
582 h->predict_8x8[i_mode]( p_dst, edge );
585 void x264_predict_lossless_16x16( x264_t *h, int i_mode )
587 int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
588 if( i_mode == I_PRED_16x16_V )
589 h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
590 else if( i_mode == I_PRED_16x16_H )
591 h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 );
593 h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
596 /*****************************************************************************
597 * x264_macroblock_encode:
598 *****************************************************************************/
599 void x264_macroblock_encode( x264_t *h )
602 int i_qp = h->mb.i_qp;
603 int b_decimate = h->mb.b_dct_decimate;
604 int b_force_no_skip = 0;
606 h->mb.i_cbp_luma = 0;
607 h->mb.cache.non_zero_count[x264_scan8[24]] = 0;
610 && h->mb.i_mb_xy == h->sh.i_first_mb + h->mb.i_mb_stride
611 && IS_SKIP(h->mb.type[h->sh.i_first_mb]) )
613 /* The first skip is predicted to be a frame mb pair.
614 * We don't yet support the aff part of mbaff, so force it to non-skip
615 * so that we can pick the aff flag. */
617 if( IS_SKIP(h->mb.i_type) )
619 if( h->mb.i_type == P_SKIP )
621 else if( h->mb.i_type == B_SKIP )
622 h->mb.i_type = B_DIRECT;
626 if( h->mb.i_type == P_SKIP )
629 x264_macroblock_encode_pskip( h );
632 if( h->mb.i_type == B_SKIP )
634 /* don't do bskip motion compensation if it was already done in macroblock_analyse */
635 if( !h->mb.b_skip_mc )
637 x264_macroblock_encode_skip( h );
641 if( h->mb.i_type == I_16x16 )
643 const int i_mode = h->mb.i_intra16x16_pred_mode;
644 h->mb.b_transform_8x8 = 0;
646 if( h->mb.b_lossless )
647 x264_predict_lossless_16x16( h, i_mode );
649 h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
651 /* encode the 16x16 macroblock */
652 x264_mb_encode_i16x16( h, i_qp );
654 else if( h->mb.i_type == I_8x8 )
656 ALIGNED_ARRAY_16( uint8_t, edge,[33] );
657 h->mb.b_transform_8x8 = 1;
658 /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
659 if( h->mb.i_skip_intra )
661 h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
662 M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i8x8_nnz_buf[0];
663 M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i8x8_nnz_buf[1];
664 M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i8x8_nnz_buf[2];
665 M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i8x8_nnz_buf[3];
666 h->mb.i_cbp_luma = h->mb.pic.i8x8_cbp;
667 /* In RD mode, restore the now-overwritten DCT data. */
668 if( h->mb.i_skip_intra == 2 )
669 h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
671 for( i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ )
673 uint8_t *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE];
674 int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
675 h->predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] );
677 if( h->mb.b_lossless )
678 x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge );
680 h->predict_8x8[i_mode]( p_dst, edge );
682 x264_mb_encode_i8x8( h, i, i_qp );
685 else if( h->mb.i_type == I_4x4 )
687 h->mb.b_transform_8x8 = 0;
688 /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
689 if( h->mb.i_skip_intra )
691 h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
692 M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i4x4_nnz_buf[0];
693 M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i4x4_nnz_buf[1];
694 M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i4x4_nnz_buf[2];
695 M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i4x4_nnz_buf[3];
696 h->mb.i_cbp_luma = h->mb.pic.i4x4_cbp;
697 /* In RD mode, restore the now-overwritten DCT data. */
698 if( h->mb.i_skip_intra == 2 )
699 h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
701 for( i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ )
703 uint8_t *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]];
704 int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
706 if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
707 /* emulate missing topright samples */
708 M32( &p_dst[4-FDEC_STRIDE] ) = p_dst[3-FDEC_STRIDE] * 0x01010101U;
710 if( h->mb.b_lossless )
711 x264_predict_lossless_4x4( h, p_dst, i, i_mode );
713 h->predict_4x4[i_mode]( p_dst );
714 x264_mb_encode_i4x4( h, i, i_qp );
720 int i_decimate_mb = 0;
722 /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
723 if( !h->mb.b_skip_mc )
726 if( h->mb.b_lossless )
728 if( h->mb.b_transform_8x8 )
729 for( i8x8 = 0; i8x8 < 4; i8x8++ )
733 nz = h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8],
734 h->mb.pic.p_fenc[0]+x+y*FENC_STRIDE,
735 h->mb.pic.p_fdec[0]+x+y*FDEC_STRIDE );
736 STORE_8x8_NNZ(i8x8,nz);
737 h->mb.i_cbp_luma |= nz << i8x8;
740 for( i4x4 = 0; i4x4 < 16; i4x4++ )
742 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4],
743 h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4],
744 h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] );
745 h->mb.cache.non_zero_count[x264_scan8[i4x4]] = nz;
746 h->mb.i_cbp_luma |= nz << (i4x4>>2);
749 else if( h->mb.b_transform_8x8 )
751 ALIGNED_ARRAY_16( int16_t, dct8x8,[4],[64] );
752 b_decimate &= !h->mb.b_trellis; // 8x8 trellis is inherently optimal decimation
753 h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
754 h->nr_count[1] += h->mb.b_noise_reduction * 4;
756 for( idx = 0; idx < 4; idx++ )
758 if( h->mb.b_noise_reduction )
759 h->quantf.denoise_dct( dct8x8[idx], h->nr_residual_sum[1], h->nr_offset[1], 64 );
760 nz = x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx );
764 h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] );
767 int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] );
768 i_decimate_mb += i_decimate_8x8;
769 if( i_decimate_8x8 >= 4 )
770 h->mb.i_cbp_luma |= 1<<idx;
773 h->mb.i_cbp_luma |= 1<<idx;
777 if( i_decimate_mb < 6 && b_decimate )
779 h->mb.i_cbp_luma = 0;
784 for( idx = 0; idx < 4; idx++ )
786 if( h->mb.i_cbp_luma&(1<<idx) )
788 h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
789 h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][(idx&1)*8 + (idx>>1)*8*FDEC_STRIDE], dct8x8[idx] );
790 STORE_8x8_NNZ(idx,1);
793 STORE_8x8_NNZ(idx,0);
799 ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[16] );
800 h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
801 h->nr_count[0] += h->mb.b_noise_reduction * 16;
803 for( i8x8 = 0; i8x8 < 4; i8x8++ )
805 int i_decimate_8x8 = 0;
808 /* encode one 4x4 block */
809 for( i4x4 = 0; i4x4 < 4; i4x4++ )
811 idx = i8x8 * 4 + i4x4;
813 if( h->mb.b_noise_reduction )
814 h->quantf.denoise_dct( dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 );
815 nz = x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx );
816 h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
820 h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] );
821 h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[CQM_4PY], i_qp );
822 if( b_decimate && i_decimate_8x8 < 6 )
823 i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] );
828 /* decimate this 8x8 block */
829 i_decimate_mb += i_decimate_8x8;
832 if( i_decimate_8x8 < 4 )
833 STORE_8x8_NNZ(i8x8,0)
835 h->mb.i_cbp_luma |= 1<<i8x8;
839 h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
840 h->mb.i_cbp_luma |= 1<<i8x8;
846 if( i_decimate_mb < 6 )
848 h->mb.i_cbp_luma = 0;
853 for( i8x8 = 0; i8x8 < 4; i8x8++ )
854 if( h->mb.i_cbp_luma&(1<<i8x8) )
855 h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
862 if( IS_INTRA( h->mb.i_type ) )
864 const int i_mode = h->mb.i_chroma_pred_mode;
865 if( h->mb.b_lossless )
866 x264_predict_lossless_8x8_chroma( h, i_mode );
869 h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
870 h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
874 /* encode the 8x8 blocks */
875 x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
877 if( h->param.b_cabac )
879 i_cbp_dc = h->mb.cache.non_zero_count[x264_scan8[24]]
880 | h->mb.cache.non_zero_count[x264_scan8[25]] << 1
881 | h->mb.cache.non_zero_count[x264_scan8[26]] << 2;
885 h->mb.cbp[h->mb.i_mb_xy] = (i_cbp_dc << 8) | (h->mb.i_cbp_chroma << 4) | h->mb.i_cbp_luma;
888 * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
889 * (if multiple mv give same result)*/
890 if( !b_force_no_skip )
892 if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
893 !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
894 M32( h->mb.cache.mv[0][x264_scan8[0]] ) == M32( h->mb.cache.pskip_mv )
895 && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
897 h->mb.i_type = P_SKIP;
900 /* Check for B_SKIP */
901 if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
903 h->mb.i_type = B_SKIP;
908 /*****************************************************************************
909 * x264_macroblock_probe_skip:
910 * Check if the current MB could be encoded as a [PB]_SKIP
911 *****************************************************************************/
912 int x264_macroblock_probe_skip( x264_t *h, int b_bidir )
914 ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[16] );
915 ALIGNED_ARRAY_16( int16_t, dct2x2,[4] );
916 ALIGNED_ARRAY_16( int16_t, dctscan,[16] );
918 int i_qp = h->mb.i_qp;
928 mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
929 mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
931 /* Motion compensation */
932 h->mc.mc_luma( h->mb.pic.p_fdec[0], FDEC_STRIDE,
933 h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
934 mvp[0], mvp[1], 16, 16, &h->sh.weight[0][0] );
937 for( i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
939 int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
940 int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
942 h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset,
943 h->mb.pic.p_fdec[0] + fdec_offset );
944 /* encode one 4x4 block */
945 for( i4x4 = 0; i4x4 < 4; i4x4++ )
947 if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] ) )
949 h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
950 i_decimate_mb += h->quantf.decimate_score16( dctscan );
951 if( i_decimate_mb >= 6 )
957 i_qp = h->mb.i_chroma_qp;
958 thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
960 for( ch = 0; ch < 2; ch++ )
962 uint8_t *p_src = h->mb.pic.p_fenc[1+ch];
963 uint8_t *p_dst = h->mb.pic.p_fdec[1+ch];
967 h->mc.mc_chroma( h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
968 h->mb.pic.p_fref[0][0][4+ch], h->mb.pic.i_stride[1+ch],
969 mvp[0], mvp[1], 8, 8 );
971 if( h->sh.weight[0][1+ch].weightfn )
972 h->sh.weight[0][1+ch].weightfn[8>>2]( h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
973 h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
974 &h->sh.weight[0][1+ch], 8 );
977 /* there is almost never a termination during chroma, but we can't avoid the check entirely */
978 /* so instead we check SSD and skip the actual check if the score is low enough. */
979 ssd = h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE );
983 h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
985 /* calculate dct DC */
986 dct2x2dc( dct2x2, dct4x4 );
987 if( h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 ) )
990 /* If there wasn't a termination in DC, we can check against a much higher threshold. */
994 /* calculate dct coeffs */
995 for( i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ )
997 if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ) )
999 h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
1000 i_decimate_mb += h->quantf.decimate_score15( dctscan );
1001 if( i_decimate_mb >= 7 )
1006 h->mb.b_skip_mc = 1;
1010 /****************************************************************************
1011 * DCT-domain noise reduction / adaptive deadzone
1013 ****************************************************************************/
1015 void x264_noise_reduction_update( x264_t *h )
1018 for( cat = 0; cat < 2; cat++ )
1020 int size = cat ? 64 : 16;
1021 const uint16_t *weight = cat ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
1023 if( h->nr_count[cat] > (cat ? (1<<16) : (1<<18)) )
1025 for( i = 0; i < size; i++ )
1026 h->nr_residual_sum[cat][i] >>= 1;
1027 h->nr_count[cat] >>= 1;
1030 for( i = 0; i < size; i++ )
1031 h->nr_offset[cat][i] =
1032 ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
1033 + h->nr_residual_sum[cat][i]/2)
1034 / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
1038 /*****************************************************************************
1039 * RD only; 4 calls to this do not make up for one macroblock_encode.
1040 * doesn't transform chroma dc.
1041 *****************************************************************************/
1042 void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
1044 int i_qp = h->mb.i_qp;
1045 uint8_t *p_fenc = h->mb.pic.p_fenc[0] + (i8&1)*8 + (i8>>1)*8*FENC_STRIDE;
1046 uint8_t *p_fdec = h->mb.pic.p_fdec[0] + (i8&1)*8 + (i8>>1)*8*FDEC_STRIDE;
1047 int b_decimate = h->mb.b_dct_decimate;
1051 if( !h->mb.b_skip_mc )
1052 x264_mb_mc_8x8( h, i8 );
1054 if( h->mb.b_lossless )
1057 if( h->mb.b_transform_8x8 )
1059 nnz8x8 = h->zigzagf.sub_8x8( h->dct.luma8x8[i8], p_fenc, p_fdec );
1060 STORE_8x8_NNZ(i8,nnz8x8);
1064 for( i4 = i8*4; i4 < i8*4+4; i4++ )
1067 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4],
1068 h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4],
1069 h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4] );
1070 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1074 for( ch = 0; ch < 2; ch++ )
1077 p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
1078 p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
1079 nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i8+ch*4], p_fenc, p_fdec, &dc );
1080 h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1085 if( h->mb.b_transform_8x8 )
1087 ALIGNED_ARRAY_16( int16_t, dct8x8,[64] );
1088 h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec );
1089 nnz8x8 = x264_quant_8x8( h, dct8x8, i_qp, 0, i8 );
1092 h->zigzagf.scan_8x8( h->dct.luma8x8[i8], dct8x8 );
1094 if( b_decimate && !h->mb.b_trellis )
1095 nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[i8] );
1099 h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8PY], i_qp );
1100 h->dctf.add8x8_idct8( p_fdec, dct8x8 );
1101 STORE_8x8_NNZ(i8,1);
1104 STORE_8x8_NNZ(i8,0);
1107 STORE_8x8_NNZ(i8,0);
1112 int i_decimate_8x8 = 0;
1113 ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[16] );
1114 h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec );
1115 for( i4 = 0; i4 < 4; i4++ )
1117 nz = x264_quant_4x4( h, dct4x4[i4], i_qp, DCT_LUMA_4x4, 0, i8*4+i4 );
1118 h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = nz;
1121 h->zigzagf.scan_4x4( h->dct.luma4x4[i8*4+i4], dct4x4[i4] );
1122 h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[CQM_4PY], i_qp );
1124 i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[i8*4+i4] );
1129 if( b_decimate && i_decimate_8x8 < 4 )
1133 h->dctf.add8x8_idct( p_fdec, dct4x4 );
1135 STORE_8x8_NNZ(i8,0);
1138 i_qp = h->mb.i_chroma_qp;
1140 for( ch = 0; ch < 2; ch++ )
1142 ALIGNED_ARRAY_16( int16_t, dct4x4,[16] );
1143 p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
1144 p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
1146 h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1149 if( h->mb.b_trellis )
1150 nz = x264_quant_4x4_trellis( h, dct4x4, CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 1, 0 );
1152 nz = h->quantf.quant_4x4( dct4x4, h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
1154 h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1157 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i8+ch*4], dct4x4 );
1158 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PC], i_qp );
1159 h->dctf.add4x4_idct( p_fdec, dct4x4 );
1163 h->mb.i_cbp_luma &= ~(1 << i8);
1164 h->mb.i_cbp_luma |= nnz8x8 << i8;
1165 h->mb.i_cbp_chroma = 0x02;
1168 /*****************************************************************************
1169 * RD only, luma only
1170 *****************************************************************************/
1171 void x264_macroblock_encode_p4x4( x264_t *h, int i4 )
1173 int i_qp = h->mb.i_qp;
1174 uint8_t *p_fenc = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[i4]];
1175 uint8_t *p_fdec = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
1178 /* Don't need motion compensation as this function is only used in qpel-RD, which caches pixel data. */
1180 if( h->mb.b_lossless )
1182 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4], p_fenc, p_fdec );
1183 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1187 ALIGNED_ARRAY_16( int16_t, dct4x4,[16] );
1188 h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1189 nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 0, i4 );
1190 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1193 h->zigzagf.scan_4x4( h->dct.luma4x4[i4], dct4x4 );
1194 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PY], i_qp );
1195 h->dctf.add4x4_idct( p_fdec, dct4x4 );