1 /*****************************************************************************
2 * macroblock.c: macroblock encoding
3 *****************************************************************************
4 * Copyright (C) 2003-2011 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
28 #include "common/common.h"
29 #include "macroblock.h"
31 /* These chroma DC functions don't have assembly versions and are only used here. */
33 #define ZIG(i,y,x) level[i] = dct[x*2+y];
34 static inline void zigzag_scan_2x2_dc( dctcoef level[4], dctcoef dct[4] )
43 #define IDCT_DEQUANT_START \
44 int d0 = dct[0] + dct[1]; \
45 int d1 = dct[2] + dct[3]; \
46 int d2 = dct[0] - dct[1]; \
47 int d3 = dct[2] - dct[3]; \
48 int dmf = dequant_mf[i_qp%6][0] << i_qp/6;
50 static inline void idct_dequant_2x2_dc( dctcoef dct[4], dctcoef dct4x4[4][16], int dequant_mf[6][16], int i_qp )
53 dct4x4[0][0] = (d0 + d1) * dmf >> 5;
54 dct4x4[1][0] = (d0 - d1) * dmf >> 5;
55 dct4x4[2][0] = (d2 + d3) * dmf >> 5;
56 dct4x4[3][0] = (d2 - d3) * dmf >> 5;
59 static inline void idct_dequant_2x2_dconly( dctcoef out[4], dctcoef dct[4], int dequant_mf[6][16], int i_qp )
62 out[0] = (d0 + d1) * dmf >> 5;
63 out[1] = (d0 - d1) * dmf >> 5;
64 out[2] = (d2 + d3) * dmf >> 5;
65 out[3] = (d2 - d3) * dmf >> 5;
68 static inline void dct2x2dc( dctcoef d[4], dctcoef dct4x4[4][16] )
70 int d0 = dct4x4[0][0] + dct4x4[1][0];
71 int d1 = dct4x4[2][0] + dct4x4[3][0];
72 int d2 = dct4x4[0][0] - dct4x4[1][0];
73 int d3 = dct4x4[2][0] - dct4x4[3][0];
84 static ALWAYS_INLINE int x264_quant_4x4( x264_t *h, dctcoef dct[16], int i_qp, int ctx_block_cat, int b_intra, int idx )
86 int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY;
88 return x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, ctx_block_cat, b_intra, 0, idx );
90 return h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
93 static ALWAYS_INLINE int x264_quant_8x8( x264_t *h, dctcoef dct[64], int i_qp, int b_intra, int idx )
95 int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY;
97 return x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx );
99 return h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] );
102 /* All encoding functions must output the correct CBP and NNZ values.
103 * The entropy coding functions will check CBP first, then NNZ, before
104 * actually reading the DCT coefficients. NNZ still must be correct even
105 * if CBP is zero because of the use of NNZ values for context selection.
106 * "NNZ" need only be 0 or 1 rather than the exact coefficient count because
107 * that is only needed in CAVLC, and will be calculated by CAVLC's residual
108 * coding and stored as necessary. */
110 /* This means that decimation can be done merely by adjusting the CBP and NNZ
111 * rather than memsetting the coefficients. */
113 void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp )
116 pixel *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]];
117 pixel *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]];
118 ALIGNED_ARRAY_16( dctcoef, dct4x4,[16] );
120 if( h->mb.b_lossless )
122 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst );
123 h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
124 h->mb.i_cbp_luma |= nz<<(idx>>2);
128 h->dctf.sub4x4_dct( dct4x4, p_src, p_dst );
130 nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx );
131 h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
134 h->mb.i_cbp_luma |= 1<<(idx>>2);
135 h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 );
136 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp );
137 h->dctf.add4x4_idct( p_dst, dct4x4 );
141 #define STORE_8x8_NNZ( s8, nz )\
144 M16( &h->mb.cache.non_zero_count[(s8) + 0*8] ) = (nz) * 0x0101;\
145 M16( &h->mb.cache.non_zero_count[(s8) + 1*8] ) = (nz) * 0x0101;\
148 #define CLEAR_16x16_NNZ \
150 M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = 0;\
151 M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = 0;\
152 M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = 0;\
153 M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = 0;\
156 void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp )
160 int s8 = X264_SCAN8_0 + 2*x + 16*y;
162 pixel *p_src = &h->mb.pic.p_fenc[0][8*x + 8*y*FENC_STRIDE];
163 pixel *p_dst = &h->mb.pic.p_fdec[0][8*x + 8*y*FDEC_STRIDE];
164 ALIGNED_ARRAY_16( dctcoef, dct8x8,[64] );
166 if( h->mb.b_lossless )
168 nz = h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst );
169 STORE_8x8_NNZ( s8, nz );
170 h->mb.i_cbp_luma |= nz<<idx;
174 h->dctf.sub8x8_dct8( dct8x8, p_src, p_dst );
176 nz = x264_quant_8x8( h, dct8x8, i_qp, 1, idx );
179 h->mb.i_cbp_luma |= 1<<idx;
180 h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 );
181 h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp );
182 h->dctf.add8x8_idct8( p_dst, dct8x8 );
183 STORE_8x8_NNZ( s8, 1 );
186 STORE_8x8_NNZ( s8, 0 );
189 static void x264_mb_encode_i16x16( x264_t *h, int i_qp )
191 pixel *p_src = h->mb.pic.p_fenc[0];
192 pixel *p_dst = h->mb.pic.p_fdec[0];
194 ALIGNED_ARRAY_16( dctcoef, dct4x4,[16],[16] );
195 ALIGNED_ARRAY_16( dctcoef, dct_dc4x4,[16] );
198 int decimate_score = h->mb.b_dct_decimate ? 0 : 9;
200 if( h->mb.b_lossless )
202 for( int i = 0; i < 16; i++ )
204 int oe = block_idx_xy_fenc[i];
205 int od = block_idx_xy_fdec[i];
206 nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[i], p_src+oe, p_dst+od, &dct_dc4x4[block_idx_yx_1d[i]] );
207 h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
208 h->mb.i_cbp_luma |= nz;
210 h->mb.i_cbp_luma *= 0xf;
211 h->mb.cache.non_zero_count[x264_scan8[24]] = array_non_zero( dct_dc4x4 );
212 h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
216 h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
218 for( int i = 0; i < 16; i++ )
221 dct_dc4x4[block_idx_xy_1d[i]] = dct4x4[i][0];
224 /* quant/scan/dequant */
225 nz = x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i );
226 h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
229 h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] );
230 h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp );
231 if( decimate_score < 6 ) decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[i] );
232 h->mb.i_cbp_luma = 0xf;
236 /* Writing the 16 CBFs in an i16x16 block is quite costly, so decimation can save many bits. */
237 /* More useful with CAVLC, but still useful with CABAC. */
238 if( decimate_score < 6 )
240 h->mb.i_cbp_luma = 0;
244 h->dctf.dct4x4dc( dct_dc4x4 );
245 if( h->mb.b_trellis )
246 nz = x264_quant_dc_trellis( h, dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1, 0 );
248 nz = h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 );
250 h->mb.cache.non_zero_count[x264_scan8[24]] = nz;
253 h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
255 /* output samples to fdec */
256 h->dctf.idct4x4dc( dct_dc4x4 );
257 h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp ); /* XXX not inversed */
258 if( h->mb.i_cbp_luma )
259 for( int i = 0; i < 16; i++ )
260 dct4x4[i][0] = dct_dc4x4[block_idx_xy_1d[i]];
263 /* put pixels to fdec */
264 if( h->mb.i_cbp_luma )
265 h->dctf.add16x16_idct( p_dst, dct4x4 );
267 h->dctf.add16x16_idct_dc( p_dst, dct_dc4x4 );
270 static inline int idct_dequant_round_2x2_dc( dctcoef ref[4], dctcoef dct[4], int dequant_mf[6][16], int i_qp )
273 idct_dequant_2x2_dconly( out, dct, dequant_mf, i_qp );
274 return ((ref[0] ^ (out[0]+32))
275 | (ref[1] ^ (out[1]+32))
276 | (ref[2] ^ (out[2]+32))
277 | (ref[3] ^ (out[3]+32))) >> 6;
280 /* Round down coefficients losslessly in DC-only chroma blocks.
281 * Unlike luma blocks, this can't be done with a lookup table or
282 * other shortcut technique because of the interdependencies
283 * between the coefficients due to the chroma DC transform. */
284 static inline int x264_mb_optimize_chroma_dc( x264_t *h, int b_inter, int i_qp, dctcoef dct2x2[4] )
286 dctcoef dct2x2_orig[4];
289 /* If the QP is too high, there's no benefit to rounding optimization. */
290 if( h->dequant4_mf[CQM_4IC + b_inter][i_qp%6][0] << (i_qp/6) > 32*64 )
293 idct_dequant_2x2_dconly( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
294 dct2x2_orig[0] += 32;
295 dct2x2_orig[1] += 32;
296 dct2x2_orig[2] += 32;
297 dct2x2_orig[3] += 32;
299 /* If the DC coefficients already round to zero, terminate early. */
300 if( !((dct2x2_orig[0]|dct2x2_orig[1]|dct2x2_orig[2]|dct2x2_orig[3])>>6) )
303 /* Start with the highest frequency coefficient... is this the best option? */
304 for( nz = 0, coeff = h->quantf.coeff_last[DCT_CHROMA_DC]( dct2x2 ); coeff >= 0; coeff-- )
306 int level = dct2x2[coeff];
307 int sign = level>>31 | 1; /* dct2x2[coeff] < 0 ? -1 : 1 */
311 dct2x2[coeff] = level - sign;
312 if( idct_dequant_round_2x2_dc( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp ) )
315 dct2x2[coeff] = level;
325 void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp )
328 int b_decimate = b_inter && h->mb.b_dct_decimate;
329 ALIGNED_ARRAY_16( dctcoef, dct2x2,[4] );
330 h->mb.i_cbp_chroma = 0;
332 /* Early termination: check variance of chroma residual before encoding.
333 * Don't bother trying early termination at low QPs.
334 * Values are experimentally derived. */
335 if( b_decimate && i_qp >= (h->mb.b_trellis ? 12 : 18) )
337 int thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
339 int score = h->pixf.var2_8x8( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE, &ssd[0] );
340 if( score < thresh*4 )
341 score += h->pixf.var2_8x8( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE, &ssd[1] );
342 if( score < thresh*4 )
344 h->mb.cache.non_zero_count[x264_scan8[16]] = 0;
345 h->mb.cache.non_zero_count[x264_scan8[17]] = 0;
346 h->mb.cache.non_zero_count[x264_scan8[18]] = 0;
347 h->mb.cache.non_zero_count[x264_scan8[19]] = 0;
348 h->mb.cache.non_zero_count[x264_scan8[20]] = 0;
349 h->mb.cache.non_zero_count[x264_scan8[21]] = 0;
350 h->mb.cache.non_zero_count[x264_scan8[22]] = 0;
351 h->mb.cache.non_zero_count[x264_scan8[23]] = 0;
352 M16( &h->mb.cache.non_zero_count[x264_scan8[25]] ) = 0;
354 for( int ch = 0; ch < 2; ch++ )
356 if( ssd[ch] > thresh )
358 h->dctf.sub8x8_dct_dc( dct2x2, h->mb.pic.p_fenc[1+ch], h->mb.pic.p_fdec[1+ch] );
359 if( h->mb.b_trellis )
360 nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
362 nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
366 if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
368 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 1;
369 zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
370 idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
371 h->dctf.add8x8_idct_dc( h->mb.pic.p_fdec[1+ch], dct2x2 );
372 h->mb.i_cbp_chroma = 1;
380 for( int ch = 0; ch < 2; ch++ )
382 pixel *p_src = h->mb.pic.p_fenc[1+ch];
383 pixel *p_dst = h->mb.pic.p_fdec[1+ch];
384 int i_decimate_score = 0;
387 ALIGNED_ARRAY_16( dctcoef, dct4x4,[4],[16] );
389 if( h->mb.b_lossless )
391 for( int i = 0; i < 4; i++ )
393 int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE;
394 int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE;
395 nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od, &h->dct.chroma_dc[ch][i] );
396 h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
397 h->mb.i_cbp_chroma |= nz;
399 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = array_non_zero( h->dct.chroma_dc[ch] );
403 h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
404 dct2x2dc( dct2x2, dct4x4 );
405 /* calculate dct coeffs */
406 for( int i = 0; i < 4; i++ )
408 if( h->mb.b_trellis )
409 nz = x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 1, 0 );
411 nz = h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] );
412 h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
416 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] );
417 h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp );
419 i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] );
423 if( h->mb.b_trellis )
424 nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
426 nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
428 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = nz_dc;
430 if( (b_decimate && i_decimate_score < 7) || !nz_ac )
432 /* Decimate the block */
433 h->mb.cache.non_zero_count[x264_scan8[16+0]+24*ch] = 0;
434 h->mb.cache.non_zero_count[x264_scan8[16+1]+24*ch] = 0;
435 h->mb.cache.non_zero_count[x264_scan8[16+2]+24*ch] = 0;
436 h->mb.cache.non_zero_count[x264_scan8[16+3]+24*ch] = 0;
437 if( !nz_dc ) /* Whole block is empty */
439 if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
441 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 0;
445 zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
446 idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
447 h->dctf.add8x8_idct_dc( p_dst, dct2x2 );
451 h->mb.i_cbp_chroma = 1;
454 zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
455 idct_dequant_2x2_dc( dct2x2, dct4x4, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
457 h->dctf.add8x8_idct( p_dst, dct4x4 );
461 /* 0 = none, 1 = DC only, 2 = DC+AC */
462 h->mb.i_cbp_chroma = ((!!M16( &h->mb.cache.non_zero_count[x264_scan8[25]] )) | h->mb.i_cbp_chroma) + h->mb.i_cbp_chroma;
465 static void x264_macroblock_encode_skip( x264_t *h )
467 M32( &h->mb.cache.non_zero_count[x264_scan8[0]+0*8] ) = 0;
468 M32( &h->mb.cache.non_zero_count[x264_scan8[0]+1*8] ) = 0;
469 M32( &h->mb.cache.non_zero_count[x264_scan8[0]+2*8] ) = 0;
470 M32( &h->mb.cache.non_zero_count[x264_scan8[0]+3*8] ) = 0;
471 for( int i = 16; i < 24; i++ )
472 h->mb.cache.non_zero_count[x264_scan8[i]] = 0;
473 h->mb.i_cbp_luma = 0;
474 h->mb.i_cbp_chroma = 0;
475 h->mb.cbp[h->mb.i_mb_xy] = 0;
478 /*****************************************************************************
479 * x264_macroblock_encode_pskip:
480 * Encode an already marked skip block
481 *****************************************************************************/
482 static void x264_macroblock_encode_pskip( x264_t *h )
484 /* don't do pskip motion compensation if it was already done in macroblock_analyse */
485 if( !h->mb.b_skip_mc )
487 int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
488 h->mb.mv_min[0], h->mb.mv_max[0] );
489 int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
490 h->mb.mv_min[1], h->mb.mv_max[1] );
492 h->mc.mc_luma( h->mb.pic.p_fdec[0], FDEC_STRIDE,
493 h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
494 mvx, mvy, 16, 16, &h->sh.weight[0][0] );
496 /* Special case for mv0, which is (of course) very common in P-skip mode. */
498 h->mc.mc_chroma( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], FDEC_STRIDE,
499 h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
502 h->mc.load_deinterleave_8x8x2_fdec( h->mb.pic.p_fdec[1], h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1] );
504 if( h->sh.weight[0][1].weightfn )
505 h->sh.weight[0][1].weightfn[8>>2]( h->mb.pic.p_fdec[1], FDEC_STRIDE,
506 h->mb.pic.p_fdec[1], FDEC_STRIDE,
507 &h->sh.weight[0][1], 8 );
508 if( h->sh.weight[0][2].weightfn )
509 h->sh.weight[0][2].weightfn[8>>2]( h->mb.pic.p_fdec[2], FDEC_STRIDE,
510 h->mb.pic.p_fdec[2], FDEC_STRIDE,
511 &h->sh.weight[0][2], 8 );
514 x264_macroblock_encode_skip( h );
517 /*****************************************************************************
518 * Intra prediction for predictive lossless mode.
519 *****************************************************************************/
521 void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode )
523 if( i_mode == I_PRED_CHROMA_V )
525 h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1]-FENC_STRIDE, FENC_STRIDE, 8 );
526 h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2]-FENC_STRIDE, FENC_STRIDE, 8 );
527 memcpy( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[1]-FDEC_STRIDE, 8*sizeof(pixel) );
528 memcpy( h->mb.pic.p_fdec[2], h->mb.pic.p_fdec[2]-FDEC_STRIDE, 8*sizeof(pixel) );
530 else if( i_mode == I_PRED_CHROMA_H )
532 h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1]-1, FENC_STRIDE, 8 );
533 h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2]-1, FENC_STRIDE, 8 );
534 x264_copy_column8( h->mb.pic.p_fdec[1]+4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+4*FDEC_STRIDE-1 );
535 x264_copy_column8( h->mb.pic.p_fdec[2]+4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+4*FDEC_STRIDE-1 );
539 h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
540 h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
544 void x264_predict_lossless_4x4( x264_t *h, pixel *p_dst, int idx, int i_mode )
546 int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
547 pixel *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
549 if( i_mode == I_PRED_4x4_V )
550 h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
551 else if( i_mode == I_PRED_4x4_H )
552 h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
554 h->predict_4x4[i_mode]( p_dst );
557 void x264_predict_lossless_8x8( x264_t *h, pixel *p_dst, int idx, int i_mode, pixel edge[33] )
559 int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
560 pixel *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
562 if( i_mode == I_PRED_8x8_V )
563 h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
564 else if( i_mode == I_PRED_8x8_H )
565 h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
567 h->predict_8x8[i_mode]( p_dst, edge );
570 void x264_predict_lossless_16x16( x264_t *h, int i_mode )
572 int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
573 if( i_mode == I_PRED_16x16_V )
574 h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
575 else if( i_mode == I_PRED_16x16_H )
576 h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 );
578 h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
581 /*****************************************************************************
582 * x264_macroblock_encode:
583 *****************************************************************************/
584 void x264_macroblock_encode( x264_t *h )
586 int i_qp = h->mb.i_qp;
587 int b_decimate = h->mb.b_dct_decimate;
588 int b_force_no_skip = 0;
590 h->mb.i_cbp_luma = 0;
591 h->mb.cache.non_zero_count[x264_scan8[24]] = 0;
593 if( h->mb.i_type == I_PCM )
595 /* if PCM is chosen, we need to store reconstructed frame data */
596 h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc[0], FENC_STRIDE, 16 );
597 h->mc.copy[PIXEL_8x8] ( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1], FENC_STRIDE, 8 );
598 h->mc.copy[PIXEL_8x8] ( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2], FENC_STRIDE, 8 );
603 && h->mb.i_mb_xy == h->sh.i_first_mb + h->mb.i_mb_stride
604 && IS_SKIP(h->mb.type[h->sh.i_first_mb]) )
606 /* The first skip is predicted to be a frame mb pair.
607 * We don't yet support the aff part of mbaff, so force it to non-skip
608 * so that we can pick the aff flag. */
610 if( IS_SKIP(h->mb.i_type) )
612 if( h->mb.i_type == P_SKIP )
614 else if( h->mb.i_type == B_SKIP )
615 h->mb.i_type = B_DIRECT;
619 if( h->mb.i_type == P_SKIP )
622 x264_macroblock_encode_pskip( h );
625 if( h->mb.i_type == B_SKIP )
627 /* don't do bskip motion compensation if it was already done in macroblock_analyse */
628 if( !h->mb.b_skip_mc )
630 x264_macroblock_encode_skip( h );
634 if( h->mb.i_type == I_16x16 )
636 const int i_mode = h->mb.i_intra16x16_pred_mode;
637 h->mb.b_transform_8x8 = 0;
639 if( h->mb.b_lossless )
640 x264_predict_lossless_16x16( h, i_mode );
642 h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
644 /* encode the 16x16 macroblock */
645 x264_mb_encode_i16x16( h, i_qp );
647 else if( h->mb.i_type == I_8x8 )
649 ALIGNED_ARRAY_16( pixel, edge,[33] );
650 h->mb.b_transform_8x8 = 1;
651 /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
652 if( h->mb.i_skip_intra )
654 h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
655 M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i8x8_nnz_buf[0];
656 M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i8x8_nnz_buf[1];
657 M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i8x8_nnz_buf[2];
658 M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i8x8_nnz_buf[3];
659 h->mb.i_cbp_luma = h->mb.pic.i8x8_cbp;
660 /* In RD mode, restore the now-overwritten DCT data. */
661 if( h->mb.i_skip_intra == 2 )
662 h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
664 for( int i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ )
666 pixel *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE];
667 int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
668 h->predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] );
670 if( h->mb.b_lossless )
671 x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge );
673 h->predict_8x8[i_mode]( p_dst, edge );
675 x264_mb_encode_i8x8( h, i, i_qp );
678 else if( h->mb.i_type == I_4x4 )
680 h->mb.b_transform_8x8 = 0;
681 /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
682 if( h->mb.i_skip_intra )
684 h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
685 M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i4x4_nnz_buf[0];
686 M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i4x4_nnz_buf[1];
687 M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i4x4_nnz_buf[2];
688 M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i4x4_nnz_buf[3];
689 h->mb.i_cbp_luma = h->mb.pic.i4x4_cbp;
690 /* In RD mode, restore the now-overwritten DCT data. */
691 if( h->mb.i_skip_intra == 2 )
692 h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
694 for( int i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ )
696 pixel *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]];
697 int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
699 if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
700 /* emulate missing topright samples */
701 MPIXEL_X4( &p_dst[4-FDEC_STRIDE] ) = PIXEL_SPLAT_X4( p_dst[3-FDEC_STRIDE] );
703 if( h->mb.b_lossless )
704 x264_predict_lossless_4x4( h, p_dst, i, i_mode );
706 h->predict_4x4[i_mode]( p_dst );
707 x264_mb_encode_i4x4( h, i, i_qp );
712 int i_decimate_mb = 0;
714 /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
715 if( !h->mb.b_skip_mc )
718 if( h->mb.b_lossless )
720 if( h->mb.b_transform_8x8 )
721 for( int i8x8 = 0; i8x8 < 4; i8x8++ )
725 int s8 = X264_SCAN8_0 + 2*x + 16*y;
727 nz = h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8], h->mb.pic.p_fenc[0] + 8*x + 8*y*FENC_STRIDE,
728 h->mb.pic.p_fdec[0] + 8*x + 8*y*FDEC_STRIDE );
729 STORE_8x8_NNZ( s8, nz );
730 h->mb.i_cbp_luma |= nz << i8x8;
733 for( int i4x4 = 0; i4x4 < 16; i4x4++ )
735 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4],
736 h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4],
737 h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] );
738 h->mb.cache.non_zero_count[x264_scan8[i4x4]] = nz;
739 h->mb.i_cbp_luma |= nz << (i4x4>>2);
742 else if( h->mb.b_transform_8x8 )
744 ALIGNED_ARRAY_16( dctcoef, dct8x8,[4],[64] );
745 b_decimate &= !h->mb.b_trellis || !h->param.b_cabac; // 8x8 trellis is inherently optimal decimation for CABAC
746 h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
747 h->nr_count[1] += h->mb.b_noise_reduction * 4;
749 for( int idx = 0; idx < 4; idx++ )
751 if( h->mb.b_noise_reduction )
752 h->quantf.denoise_dct( dct8x8[idx], h->nr_residual_sum[1], h->nr_offset[1], 64 );
753 nz = x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx );
757 h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] );
760 int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] );
761 i_decimate_mb += i_decimate_8x8;
762 if( i_decimate_8x8 >= 4 )
763 h->mb.i_cbp_luma |= 1<<idx;
766 h->mb.i_cbp_luma |= 1<<idx;
770 if( i_decimate_mb < 6 && b_decimate )
772 h->mb.i_cbp_luma = 0;
777 for( int idx = 0; idx < 4; idx++ )
781 int s8 = X264_SCAN8_0 + 2*x + 16*y;
783 if( h->mb.i_cbp_luma&(1<<idx) )
785 h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
786 h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][8*x + 8*y*FDEC_STRIDE], dct8x8[idx] );
787 STORE_8x8_NNZ( s8, 1 );
790 STORE_8x8_NNZ( s8, 0 );
796 ALIGNED_ARRAY_16( dctcoef, dct4x4,[16],[16] );
797 h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
798 h->nr_count[0] += h->mb.b_noise_reduction * 16;
800 for( int i8x8 = 0; i8x8 < 4; i8x8++ )
802 int i_decimate_8x8 = 0;
805 /* encode one 4x4 block */
806 for( int i4x4 = 0; i4x4 < 4; i4x4++ )
808 int idx = i8x8 * 4 + i4x4;
810 if( h->mb.b_noise_reduction )
811 h->quantf.denoise_dct( dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 );
812 nz = x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx );
813 h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
817 h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] );
818 h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[CQM_4PY], i_qp );
819 if( b_decimate && i_decimate_8x8 < 6 )
820 i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] );
828 /* decimate this 8x8 block */
829 i_decimate_mb += i_decimate_8x8;
832 if( i_decimate_8x8 < 4 )
834 int s8 = X264_SCAN8_0 + 2*x + 16*y;
835 STORE_8x8_NNZ( s8, 0 );
838 h->mb.i_cbp_luma |= 1<<i8x8;
842 h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][8*x + 8*y*FDEC_STRIDE], &dct4x4[i8x8*4] );
843 h->mb.i_cbp_luma |= 1<<i8x8;
849 if( i_decimate_mb < 6 )
851 h->mb.i_cbp_luma = 0;
856 for( int i8x8 = 0; i8x8 < 4; i8x8++ )
857 if( h->mb.i_cbp_luma&(1<<i8x8) )
858 h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
865 if( IS_INTRA( h->mb.i_type ) )
867 const int i_mode = h->mb.i_chroma_pred_mode;
868 if( h->mb.b_lossless )
869 x264_predict_lossless_8x8_chroma( h, i_mode );
872 h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
873 h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
877 /* encode the 8x8 blocks */
878 x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
881 int cbp = h->mb.i_cbp_chroma << 4 | h->mb.i_cbp_luma;
882 if( h->param.b_cabac )
883 cbp |= h->mb.cache.non_zero_count[x264_scan8[24]] << 8
884 | h->mb.cache.non_zero_count[x264_scan8[25]] << 9
885 | h->mb.cache.non_zero_count[x264_scan8[26]] << 10;
886 h->mb.cbp[h->mb.i_mb_xy] = cbp;
889 * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
890 * (if multiple mv give same result)*/
891 if( !b_force_no_skip )
893 if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
894 !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
895 M32( h->mb.cache.mv[0][x264_scan8[0]] ) == M32( h->mb.cache.pskip_mv )
896 && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
898 h->mb.i_type = P_SKIP;
901 /* Check for B_SKIP */
902 if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
904 h->mb.i_type = B_SKIP;
909 /*****************************************************************************
910 * x264_macroblock_probe_skip:
911 * Check if the current MB could be encoded as a [PB]_SKIP
912 *****************************************************************************/
913 int x264_macroblock_probe_skip( x264_t *h, int b_bidir )
915 ALIGNED_ARRAY_16( dctcoef, dct4x4,[4],[16] );
916 ALIGNED_ARRAY_16( dctcoef, dct2x2,[4] );
917 ALIGNED_ARRAY_16( dctcoef, dctscan,[16] );
918 ALIGNED_4( int16_t mvp[2] );
920 int i_qp = h->mb.i_qp;
926 mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
927 mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
929 /* Motion compensation */
930 h->mc.mc_luma( h->mb.pic.p_fdec[0], FDEC_STRIDE,
931 h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
932 mvp[0], mvp[1], 16, 16, &h->sh.weight[0][0] );
935 for( int i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
937 int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
938 int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
940 h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset,
941 h->mb.pic.p_fdec[0] + fdec_offset );
942 /* encode one 4x4 block */
943 for( int i4x4 = 0; i4x4 < 4; i4x4++ )
945 if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] ) )
947 h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
948 i_decimate_mb += h->quantf.decimate_score16( dctscan );
949 if( i_decimate_mb >= 6 )
955 i_qp = h->mb.i_chroma_qp;
956 thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
960 /* Special case for mv0, which is (of course) very common in P-skip mode. */
962 h->mc.mc_chroma( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], FDEC_STRIDE,
963 h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
964 mvp[0], mvp[1], 8, 8 );
966 h->mc.load_deinterleave_8x8x2_fdec( h->mb.pic.p_fdec[1], h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1] );
969 for( int ch = 0; ch < 2; ch++ )
971 pixel *p_src = h->mb.pic.p_fenc[1+ch];
972 pixel *p_dst = h->mb.pic.p_fdec[1+ch];
974 if( !b_bidir && h->sh.weight[0][1+ch].weightfn )
975 h->sh.weight[0][1+ch].weightfn[8>>2]( h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
976 h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
977 &h->sh.weight[0][1+ch], 8 );
979 /* there is almost never a termination during chroma, but we can't avoid the check entirely */
980 /* so instead we check SSD and skip the actual check if the score is low enough. */
981 ssd = h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE );
985 /* The vast majority of chroma checks will terminate during the DC check or the higher
986 * threshold check, so we can save time by doing a DC-only DCT. */
987 h->dctf.sub8x8_dct_dc( dct2x2, p_src, p_dst );
989 if( h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 ) )
992 /* If there wasn't a termination in DC, we can check against a much higher threshold. */
996 h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
998 /* calculate dct coeffs */
999 for( int i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ )
1001 dct4x4[i4x4][0] = 0;
1002 if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ) )
1004 h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
1005 i_decimate_mb += h->quantf.decimate_score15( dctscan );
1006 if( i_decimate_mb >= 7 )
1011 h->mb.b_skip_mc = 1;
1015 /****************************************************************************
1016 * DCT-domain noise reduction / adaptive deadzone
1018 ****************************************************************************/
1020 void x264_noise_reduction_update( x264_t *h )
1022 for( int cat = 0; cat < 2; cat++ )
1024 int size = cat ? 64 : 16;
1025 const uint16_t *weight = cat ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
1027 if( h->nr_count[cat] > (cat ? (1<<16) : (1<<18)) )
1029 for( int i = 0; i < size; i++ )
1030 h->nr_residual_sum[cat][i] >>= 1;
1031 h->nr_count[cat] >>= 1;
1034 for( int i = 0; i < size; i++ )
1035 h->nr_offset[cat][i] =
1036 ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
1037 + h->nr_residual_sum[cat][i]/2)
1038 / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
1042 /*****************************************************************************
1043 * RD only; 4 calls to this do not make up for one macroblock_encode.
1044 * doesn't transform chroma dc.
1045 *****************************************************************************/
1046 void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
1048 int i_qp = h->mb.i_qp;
1051 int s8 = X264_SCAN8_0 + 2*x + 16*y;
1052 pixel *p_fenc = h->mb.pic.p_fenc[0] + 8*x + 8*y*FENC_STRIDE;
1053 pixel *p_fdec = h->mb.pic.p_fdec[0] + 8*x + 8*y*FDEC_STRIDE;
1054 int b_decimate = h->mb.b_dct_decimate;
1058 if( !h->mb.b_skip_mc )
1059 x264_mb_mc_8x8( h, i8 );
1061 if( h->mb.b_lossless )
1063 if( h->mb.b_transform_8x8 )
1065 nnz8x8 = h->zigzagf.sub_8x8( h->dct.luma8x8[i8], p_fenc, p_fdec );
1066 STORE_8x8_NNZ( s8, nnz8x8 );
1070 for( int i4 = i8*4; i4 < i8*4+4; i4++ )
1072 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4],
1073 h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4],
1074 h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4] );
1075 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1079 for( int ch = 0; ch < 2; ch++ )
1082 p_fenc = h->mb.pic.p_fenc[1+ch] + 4*x + 4*y*FENC_STRIDE;
1083 p_fdec = h->mb.pic.p_fdec[1+ch] + 4*x + 4*y*FDEC_STRIDE;
1084 nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i8+ch*4], p_fenc, p_fdec, &dc );
1085 h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1090 if( h->mb.b_transform_8x8 )
1092 ALIGNED_ARRAY_16( dctcoef, dct8x8,[64] );
1093 h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec );
1094 nnz8x8 = x264_quant_8x8( h, dct8x8, i_qp, 0, i8 );
1097 h->zigzagf.scan_8x8( h->dct.luma8x8[i8], dct8x8 );
1099 if( b_decimate && !h->mb.b_trellis )
1100 nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[i8] );
1104 h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8PY], i_qp );
1105 h->dctf.add8x8_idct8( p_fdec, dct8x8 );
1106 STORE_8x8_NNZ( s8, 1 );
1109 STORE_8x8_NNZ( s8, 0 );
1112 STORE_8x8_NNZ( s8, 0 );
1116 int i_decimate_8x8 = 0;
1117 ALIGNED_ARRAY_16( dctcoef, dct4x4,[4],[16] );
1118 h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec );
1119 for( int i4 = 0; i4 < 4; i4++ )
1121 nz = x264_quant_4x4( h, dct4x4[i4], i_qp, DCT_LUMA_4x4, 0, i8*4+i4 );
1122 h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = nz;
1125 h->zigzagf.scan_4x4( h->dct.luma4x4[i8*4+i4], dct4x4[i4] );
1126 h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[CQM_4PY], i_qp );
1128 i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[i8*4+i4] );
1133 if( b_decimate && i_decimate_8x8 < 4 )
1137 h->dctf.add8x8_idct( p_fdec, dct4x4 );
1139 STORE_8x8_NNZ( s8, 0 );
1142 i_qp = h->mb.i_chroma_qp;
1144 for( int ch = 0; ch < 2; ch++ )
1146 ALIGNED_ARRAY_16( dctcoef, dct4x4,[16] );
1147 p_fenc = h->mb.pic.p_fenc[1+ch] + 4*x + 4*y*FENC_STRIDE;
1148 p_fdec = h->mb.pic.p_fdec[1+ch] + 4*x + 4*y*FDEC_STRIDE;
1150 h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1153 if( h->mb.b_trellis )
1154 nz = x264_quant_4x4_trellis( h, dct4x4, CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 1, 0 );
1156 nz = h->quantf.quant_4x4( dct4x4, h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
1158 h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1161 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i8+ch*4], dct4x4 );
1162 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PC], i_qp );
1163 h->dctf.add4x4_idct( p_fdec, dct4x4 );
1167 h->mb.i_cbp_luma &= ~(1 << i8);
1168 h->mb.i_cbp_luma |= nnz8x8 << i8;
1169 h->mb.i_cbp_chroma = 0x02;
1172 /*****************************************************************************
1173 * RD only, luma only
1174 *****************************************************************************/
1175 void x264_macroblock_encode_p4x4( x264_t *h, int i4 )
1177 int i_qp = h->mb.i_qp;
1178 pixel *p_fenc = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[i4]];
1179 pixel *p_fdec = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
1182 /* Don't need motion compensation as this function is only used in qpel-RD, which caches pixel data. */
1184 if( h->mb.b_lossless )
1186 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4], p_fenc, p_fdec );
1187 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1191 ALIGNED_ARRAY_16( dctcoef, dct4x4,[16] );
1192 h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1193 nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 0, i4 );
1194 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1197 h->zigzagf.scan_4x4( h->dct.luma4x4[i4], dct4x4 );
1198 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PY], i_qp );
1199 h->dctf.add4x4_idct( p_fdec, dct4x4 );