]> git.sesse.net Git - x264/blob - encoder/macroblock.c
Bump dates to 2011
[x264] / encoder / macroblock.c
1 /*****************************************************************************
2  * macroblock.c: macroblock encoding
3  *****************************************************************************
4  * Copyright (C) 2003-2011 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *
24  * This program is also available under a commercial proprietary license.
25  * For more information, contact us at licensing@x264.com.
26  *****************************************************************************/
27
28 #include "common/common.h"
29 #include "macroblock.h"
30
31 /* These chroma DC functions don't have assembly versions and are only used here. */
32
33 #define ZIG(i,y,x) level[i] = dct[x*2+y];
34 static inline void zigzag_scan_2x2_dc( dctcoef level[4], dctcoef dct[4] )
35 {
36     ZIG(0,0,0)
37     ZIG(1,0,1)
38     ZIG(2,1,0)
39     ZIG(3,1,1)
40 }
41 #undef ZIG
42
43 #define IDCT_DEQUANT_START \
44     int d0 = dct[0] + dct[1]; \
45     int d1 = dct[2] + dct[3]; \
46     int d2 = dct[0] - dct[1]; \
47     int d3 = dct[2] - dct[3]; \
48     int dmf = dequant_mf[i_qp%6][0] << i_qp/6;
49
50 static inline void idct_dequant_2x2_dc( dctcoef dct[4], dctcoef dct4x4[4][16], int dequant_mf[6][16], int i_qp )
51 {
52     IDCT_DEQUANT_START
53     dct4x4[0][0] = (d0 + d1) * dmf >> 5;
54     dct4x4[1][0] = (d0 - d1) * dmf >> 5;
55     dct4x4[2][0] = (d2 + d3) * dmf >> 5;
56     dct4x4[3][0] = (d2 - d3) * dmf >> 5;
57 }
58
59 static inline void idct_dequant_2x2_dconly( dctcoef out[4], dctcoef dct[4], int dequant_mf[6][16], int i_qp )
60 {
61     IDCT_DEQUANT_START
62     out[0] = (d0 + d1) * dmf >> 5;
63     out[1] = (d0 - d1) * dmf >> 5;
64     out[2] = (d2 + d3) * dmf >> 5;
65     out[3] = (d2 - d3) * dmf >> 5;
66 }
67
68 static inline void dct2x2dc( dctcoef d[4], dctcoef dct4x4[4][16] )
69 {
70     int d0 = dct4x4[0][0] + dct4x4[1][0];
71     int d1 = dct4x4[2][0] + dct4x4[3][0];
72     int d2 = dct4x4[0][0] - dct4x4[1][0];
73     int d3 = dct4x4[2][0] - dct4x4[3][0];
74     d[0] = d0 + d1;
75     d[2] = d2 + d3;
76     d[1] = d0 - d1;
77     d[3] = d2 - d3;
78     dct4x4[0][0] = 0;
79     dct4x4[1][0] = 0;
80     dct4x4[2][0] = 0;
81     dct4x4[3][0] = 0;
82 }
83
84 static ALWAYS_INLINE int x264_quant_4x4( x264_t *h, dctcoef dct[16], int i_qp, int ctx_block_cat, int b_intra, int idx )
85 {
86     int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY;
87     if( h->mb.b_trellis )
88         return x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, ctx_block_cat, b_intra, 0, idx );
89     else
90         return h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
91 }
92
93 static ALWAYS_INLINE int x264_quant_8x8( x264_t *h, dctcoef dct[64], int i_qp, int b_intra, int idx )
94 {
95     int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY;
96     if( h->mb.b_trellis )
97         return x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx );
98     else
99         return h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] );
100 }
101
102 /* All encoding functions must output the correct CBP and NNZ values.
103  * The entropy coding functions will check CBP first, then NNZ, before
104  * actually reading the DCT coefficients.  NNZ still must be correct even
105  * if CBP is zero because of the use of NNZ values for context selection.
106  * "NNZ" need only be 0 or 1 rather than the exact coefficient count because
107  * that is only needed in CAVLC, and will be calculated by CAVLC's residual
108  * coding and stored as necessary. */
109
110 /* This means that decimation can be done merely by adjusting the CBP and NNZ
111  * rather than memsetting the coefficients. */
112
113 void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp )
114 {
115     int nz;
116     pixel *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]];
117     pixel *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]];
118     ALIGNED_ARRAY_16( dctcoef, dct4x4,[16] );
119
120     if( h->mb.b_lossless )
121     {
122         nz = h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst );
123         h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
124         h->mb.i_cbp_luma |= nz<<(idx>>2);
125         return;
126     }
127
128     h->dctf.sub4x4_dct( dct4x4, p_src, p_dst );
129
130     nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx );
131     h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
132     if( nz )
133     {
134         h->mb.i_cbp_luma |= 1<<(idx>>2);
135         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 );
136         h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp );
137         h->dctf.add4x4_idct( p_dst, dct4x4 );
138     }
139 }
140
141 #define STORE_8x8_NNZ( s8, nz )\
142 do\
143 {\
144     M16( &h->mb.cache.non_zero_count[(s8) + 0*8] ) = (nz) * 0x0101;\
145     M16( &h->mb.cache.non_zero_count[(s8) + 1*8] ) = (nz) * 0x0101;\
146 } while(0)
147
148 #define CLEAR_16x16_NNZ \
149 {\
150     M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = 0;\
151     M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = 0;\
152     M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = 0;\
153     M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = 0;\
154 }
155
156 void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp )
157 {
158     int x = idx&1;
159     int y = idx>>1;
160     int s8 = X264_SCAN8_0 + 2*x + 16*y;
161     int nz;
162     pixel *p_src = &h->mb.pic.p_fenc[0][8*x + 8*y*FENC_STRIDE];
163     pixel *p_dst = &h->mb.pic.p_fdec[0][8*x + 8*y*FDEC_STRIDE];
164     ALIGNED_ARRAY_16( dctcoef, dct8x8,[64] );
165
166     if( h->mb.b_lossless )
167     {
168         nz = h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst );
169         STORE_8x8_NNZ( s8, nz );
170         h->mb.i_cbp_luma |= nz<<idx;
171         return;
172     }
173
174     h->dctf.sub8x8_dct8( dct8x8, p_src, p_dst );
175
176     nz = x264_quant_8x8( h, dct8x8, i_qp, 1, idx );
177     if( nz )
178     {
179         h->mb.i_cbp_luma |= 1<<idx;
180         h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 );
181         h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp );
182         h->dctf.add8x8_idct8( p_dst, dct8x8 );
183         STORE_8x8_NNZ( s8, 1 );
184     }
185     else
186         STORE_8x8_NNZ( s8, 0 );
187 }
188
189 static void x264_mb_encode_i16x16( x264_t *h, int i_qp )
190 {
191     pixel *p_src = h->mb.pic.p_fenc[0];
192     pixel *p_dst = h->mb.pic.p_fdec[0];
193
194     ALIGNED_ARRAY_16( dctcoef, dct4x4,[16],[16] );
195     ALIGNED_ARRAY_16( dctcoef, dct_dc4x4,[16] );
196
197     int nz;
198     int decimate_score = h->mb.b_dct_decimate ? 0 : 9;
199
200     if( h->mb.b_lossless )
201     {
202         for( int i = 0; i < 16; i++ )
203         {
204             int oe = block_idx_xy_fenc[i];
205             int od = block_idx_xy_fdec[i];
206             nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[i], p_src+oe, p_dst+od, &dct_dc4x4[block_idx_yx_1d[i]] );
207             h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
208             h->mb.i_cbp_luma |= nz;
209         }
210         h->mb.i_cbp_luma *= 0xf;
211         h->mb.cache.non_zero_count[x264_scan8[24]] = array_non_zero( dct_dc4x4 );
212         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
213         return;
214     }
215
216     h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
217
218     for( int i = 0; i < 16; i++ )
219     {
220         /* copy dc coeff */
221         dct_dc4x4[block_idx_xy_1d[i]] = dct4x4[i][0];
222         dct4x4[i][0] = 0;
223
224         /* quant/scan/dequant */
225         nz = x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i );
226         h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
227         if( nz )
228         {
229             h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] );
230             h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp );
231             if( decimate_score < 6 ) decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[i] );
232             h->mb.i_cbp_luma = 0xf;
233         }
234     }
235
236     /* Writing the 16 CBFs in an i16x16 block is quite costly, so decimation can save many bits. */
237     /* More useful with CAVLC, but still useful with CABAC. */
238     if( decimate_score < 6 )
239     {
240         h->mb.i_cbp_luma = 0;
241         CLEAR_16x16_NNZ
242     }
243
244     h->dctf.dct4x4dc( dct_dc4x4 );
245     if( h->mb.b_trellis )
246         nz = x264_quant_dc_trellis( h, dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1, 0 );
247     else
248         nz = h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 );
249
250     h->mb.cache.non_zero_count[x264_scan8[24]] = nz;
251     if( nz )
252     {
253         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
254
255         /* output samples to fdec */
256         h->dctf.idct4x4dc( dct_dc4x4 );
257         h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp );  /* XXX not inversed */
258         if( h->mb.i_cbp_luma )
259             for( int i = 0; i < 16; i++ )
260                 dct4x4[i][0] = dct_dc4x4[block_idx_xy_1d[i]];
261     }
262
263     /* put pixels to fdec */
264     if( h->mb.i_cbp_luma )
265         h->dctf.add16x16_idct( p_dst, dct4x4 );
266     else if( nz )
267         h->dctf.add16x16_idct_dc( p_dst, dct_dc4x4 );
268 }
269
270 static inline int idct_dequant_round_2x2_dc( dctcoef ref[4], dctcoef dct[4], int dequant_mf[6][16], int i_qp )
271 {
272     dctcoef out[4];
273     idct_dequant_2x2_dconly( out, dct, dequant_mf, i_qp );
274     return ((ref[0] ^ (out[0]+32))
275           | (ref[1] ^ (out[1]+32))
276           | (ref[2] ^ (out[2]+32))
277           | (ref[3] ^ (out[3]+32))) >> 6;
278 }
279
280 /* Round down coefficients losslessly in DC-only chroma blocks.
281  * Unlike luma blocks, this can't be done with a lookup table or
282  * other shortcut technique because of the interdependencies
283  * between the coefficients due to the chroma DC transform. */
284 static inline int x264_mb_optimize_chroma_dc( x264_t *h, int b_inter, int i_qp, dctcoef dct2x2[4] )
285 {
286     dctcoef dct2x2_orig[4];
287     int coeff, nz;
288
289     /* If the QP is too high, there's no benefit to rounding optimization. */
290     if( h->dequant4_mf[CQM_4IC + b_inter][i_qp%6][0] << (i_qp/6) > 32*64 )
291         return 1;
292
293     idct_dequant_2x2_dconly( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
294     dct2x2_orig[0] += 32;
295     dct2x2_orig[1] += 32;
296     dct2x2_orig[2] += 32;
297     dct2x2_orig[3] += 32;
298
299     /* If the DC coefficients already round to zero, terminate early. */
300     if( !((dct2x2_orig[0]|dct2x2_orig[1]|dct2x2_orig[2]|dct2x2_orig[3])>>6) )
301         return 0;
302
303     /* Start with the highest frequency coefficient... is this the best option? */
304     for( nz = 0, coeff = h->quantf.coeff_last[DCT_CHROMA_DC]( dct2x2 ); coeff >= 0; coeff-- )
305     {
306         int level = dct2x2[coeff];
307         int sign = level>>31 | 1; /* dct2x2[coeff] < 0 ? -1 : 1 */
308
309         while( level )
310         {
311             dct2x2[coeff] = level - sign;
312             if( idct_dequant_round_2x2_dc( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp ) )
313             {
314                 nz = 1;
315                 dct2x2[coeff] = level;
316                 break;
317             }
318             level -= sign;
319         }
320     }
321
322     return nz;
323 }
324
325 void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp )
326 {
327     int nz, nz_dc;
328     int b_decimate = b_inter && h->mb.b_dct_decimate;
329     ALIGNED_ARRAY_16( dctcoef, dct2x2,[4] );
330     h->mb.i_cbp_chroma = 0;
331
332     /* Early termination: check variance of chroma residual before encoding.
333      * Don't bother trying early termination at low QPs.
334      * Values are experimentally derived. */
335     if( b_decimate && i_qp >= (h->mb.b_trellis ? 12 : 18) )
336     {
337         int thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
338         int ssd[2];
339         int score = h->pixf.var2_8x8( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE, &ssd[0] );
340         if( score < thresh*4 )
341             score += h->pixf.var2_8x8( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE, &ssd[1] );
342         if( score < thresh*4 )
343         {
344             h->mb.cache.non_zero_count[x264_scan8[16]] = 0;
345             h->mb.cache.non_zero_count[x264_scan8[17]] = 0;
346             h->mb.cache.non_zero_count[x264_scan8[18]] = 0;
347             h->mb.cache.non_zero_count[x264_scan8[19]] = 0;
348             h->mb.cache.non_zero_count[x264_scan8[20]] = 0;
349             h->mb.cache.non_zero_count[x264_scan8[21]] = 0;
350             h->mb.cache.non_zero_count[x264_scan8[22]] = 0;
351             h->mb.cache.non_zero_count[x264_scan8[23]] = 0;
352             M16( &h->mb.cache.non_zero_count[x264_scan8[25]] ) = 0;
353
354             for( int ch = 0; ch < 2; ch++ )
355             {
356                 if( ssd[ch] > thresh )
357                 {
358                     h->dctf.sub8x8_dct_dc( dct2x2, h->mb.pic.p_fenc[1+ch], h->mb.pic.p_fdec[1+ch] );
359                     if( h->mb.b_trellis )
360                         nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
361                     else
362                         nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
363
364                     if( nz_dc )
365                     {
366                         if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
367                             continue;
368                         h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 1;
369                         zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
370                         idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
371                         h->dctf.add8x8_idct_dc( h->mb.pic.p_fdec[1+ch], dct2x2 );
372                         h->mb.i_cbp_chroma = 1;
373                     }
374                 }
375             }
376             return;
377         }
378     }
379
380     for( int ch = 0; ch < 2; ch++ )
381     {
382         pixel *p_src = h->mb.pic.p_fenc[1+ch];
383         pixel *p_dst = h->mb.pic.p_fdec[1+ch];
384         int i_decimate_score = 0;
385         int nz_ac = 0;
386
387         ALIGNED_ARRAY_16( dctcoef, dct4x4,[4],[16] );
388
389         if( h->mb.b_lossless )
390         {
391             for( int i = 0; i < 4; i++ )
392             {
393                 int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE;
394                 int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE;
395                 nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od, &h->dct.chroma_dc[ch][i] );
396                 h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
397                 h->mb.i_cbp_chroma |= nz;
398             }
399             h->mb.cache.non_zero_count[x264_scan8[25]+ch] = array_non_zero( h->dct.chroma_dc[ch] );
400             continue;
401         }
402
403         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
404         dct2x2dc( dct2x2, dct4x4 );
405         /* calculate dct coeffs */
406         for( int i = 0; i < 4; i++ )
407         {
408             if( h->mb.b_trellis )
409                 nz = x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 1, 0 );
410             else
411                 nz = h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] );
412             h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
413             if( nz )
414             {
415                 nz_ac = 1;
416                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] );
417                 h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp );
418                 if( b_decimate )
419                     i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] );
420             }
421         }
422
423         if( h->mb.b_trellis )
424             nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
425         else
426             nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
427
428         h->mb.cache.non_zero_count[x264_scan8[25]+ch] = nz_dc;
429
430         if( (b_decimate && i_decimate_score < 7) || !nz_ac )
431         {
432             /* Decimate the block */
433             h->mb.cache.non_zero_count[x264_scan8[16+0]+24*ch] = 0;
434             h->mb.cache.non_zero_count[x264_scan8[16+1]+24*ch] = 0;
435             h->mb.cache.non_zero_count[x264_scan8[16+2]+24*ch] = 0;
436             h->mb.cache.non_zero_count[x264_scan8[16+3]+24*ch] = 0;
437             if( !nz_dc ) /* Whole block is empty */
438                 continue;
439             if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
440             {
441                 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 0;
442                 continue;
443             }
444             /* DC-only */
445             zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
446             idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
447             h->dctf.add8x8_idct_dc( p_dst, dct2x2 );
448         }
449         else
450         {
451             h->mb.i_cbp_chroma = 1;
452             if( nz_dc )
453             {
454                 zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
455                 idct_dequant_2x2_dc( dct2x2, dct4x4, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
456             }
457             h->dctf.add8x8_idct( p_dst, dct4x4 );
458         }
459     }
460
461     /* 0 = none, 1 = DC only, 2 = DC+AC */
462     h->mb.i_cbp_chroma = ((!!M16( &h->mb.cache.non_zero_count[x264_scan8[25]] )) | h->mb.i_cbp_chroma) + h->mb.i_cbp_chroma;
463 }
464
465 static void x264_macroblock_encode_skip( x264_t *h )
466 {
467     M32( &h->mb.cache.non_zero_count[x264_scan8[0]+0*8] ) = 0;
468     M32( &h->mb.cache.non_zero_count[x264_scan8[0]+1*8] ) = 0;
469     M32( &h->mb.cache.non_zero_count[x264_scan8[0]+2*8] ) = 0;
470     M32( &h->mb.cache.non_zero_count[x264_scan8[0]+3*8] ) = 0;
471     for( int i = 16; i < 24; i++ )
472         h->mb.cache.non_zero_count[x264_scan8[i]] = 0;
473     h->mb.i_cbp_luma = 0;
474     h->mb.i_cbp_chroma = 0;
475     h->mb.cbp[h->mb.i_mb_xy] = 0;
476 }
477
478 /*****************************************************************************
479  * x264_macroblock_encode_pskip:
480  *  Encode an already marked skip block
481  *****************************************************************************/
482 static void x264_macroblock_encode_pskip( x264_t *h )
483 {
484     /* don't do pskip motion compensation if it was already done in macroblock_analyse */
485     if( !h->mb.b_skip_mc )
486     {
487         int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
488                               h->mb.mv_min[0], h->mb.mv_max[0] );
489         int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
490                               h->mb.mv_min[1], h->mb.mv_max[1] );
491
492         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
493                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
494                        mvx, mvy, 16, 16, &h->sh.weight[0][0] );
495
496         /* Special case for mv0, which is (of course) very common in P-skip mode. */
497         if( mvx | mvy )
498             h->mc.mc_chroma( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], FDEC_STRIDE,
499                              h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
500                              mvx, mvy, 8, 8 );
501         else
502             h->mc.load_deinterleave_8x8x2_fdec( h->mb.pic.p_fdec[1], h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1] );
503
504         if( h->sh.weight[0][1].weightfn )
505             h->sh.weight[0][1].weightfn[8>>2]( h->mb.pic.p_fdec[1], FDEC_STRIDE,
506                                                h->mb.pic.p_fdec[1], FDEC_STRIDE,
507                                                &h->sh.weight[0][1], 8 );
508         if( h->sh.weight[0][2].weightfn )
509             h->sh.weight[0][2].weightfn[8>>2]( h->mb.pic.p_fdec[2], FDEC_STRIDE,
510                                                h->mb.pic.p_fdec[2], FDEC_STRIDE,
511                                                &h->sh.weight[0][2], 8 );
512     }
513
514     x264_macroblock_encode_skip( h );
515 }
516
517 /*****************************************************************************
518  * Intra prediction for predictive lossless mode.
519  *****************************************************************************/
520
521 void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode )
522 {
523     if( i_mode == I_PRED_CHROMA_V )
524     {
525         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1]-FENC_STRIDE, FENC_STRIDE, 8 );
526         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2]-FENC_STRIDE, FENC_STRIDE, 8 );
527         memcpy( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[1]-FDEC_STRIDE, 8*sizeof(pixel) );
528         memcpy( h->mb.pic.p_fdec[2], h->mb.pic.p_fdec[2]-FDEC_STRIDE, 8*sizeof(pixel) );
529     }
530     else if( i_mode == I_PRED_CHROMA_H )
531     {
532         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1]-1, FENC_STRIDE, 8 );
533         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2]-1, FENC_STRIDE, 8 );
534         x264_copy_column8( h->mb.pic.p_fdec[1]+4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+4*FDEC_STRIDE-1 );
535         x264_copy_column8( h->mb.pic.p_fdec[2]+4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+4*FDEC_STRIDE-1 );
536     }
537     else
538     {
539         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
540         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
541     }
542 }
543
544 void x264_predict_lossless_4x4( x264_t *h, pixel *p_dst, int idx, int i_mode )
545 {
546     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
547     pixel *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
548
549     if( i_mode == I_PRED_4x4_V )
550         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
551     else if( i_mode == I_PRED_4x4_H )
552         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
553     else
554         h->predict_4x4[i_mode]( p_dst );
555 }
556
557 void x264_predict_lossless_8x8( x264_t *h, pixel *p_dst, int idx, int i_mode, pixel edge[33] )
558 {
559     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
560     pixel *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
561
562     if( i_mode == I_PRED_8x8_V )
563         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
564     else if( i_mode == I_PRED_8x8_H )
565         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
566     else
567         h->predict_8x8[i_mode]( p_dst, edge );
568 }
569
570 void x264_predict_lossless_16x16( x264_t *h, int i_mode )
571 {
572     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
573     if( i_mode == I_PRED_16x16_V )
574         h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
575     else if( i_mode == I_PRED_16x16_H )
576         h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 );
577     else
578         h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
579 }
580
581 /*****************************************************************************
582  * x264_macroblock_encode:
583  *****************************************************************************/
584 void x264_macroblock_encode( x264_t *h )
585 {
586     int i_qp = h->mb.i_qp;
587     int b_decimate = h->mb.b_dct_decimate;
588     int b_force_no_skip = 0;
589     int nz;
590     h->mb.i_cbp_luma = 0;
591     h->mb.cache.non_zero_count[x264_scan8[24]] = 0;
592
593     if( h->mb.i_type == I_PCM )
594     {
595         /* if PCM is chosen, we need to store reconstructed frame data */
596         h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc[0], FENC_STRIDE, 16 );
597         h->mc.copy[PIXEL_8x8]  ( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1], FENC_STRIDE, 8 );
598         h->mc.copy[PIXEL_8x8]  ( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2], FENC_STRIDE, 8 );
599         return;
600     }
601
602     if( h->sh.b_mbaff
603         && h->mb.i_mb_xy == h->sh.i_first_mb + h->mb.i_mb_stride
604         && IS_SKIP(h->mb.type[h->sh.i_first_mb]) )
605     {
606         /* The first skip is predicted to be a frame mb pair.
607          * We don't yet support the aff part of mbaff, so force it to non-skip
608          * so that we can pick the aff flag. */
609         b_force_no_skip = 1;
610         if( IS_SKIP(h->mb.i_type) )
611         {
612             if( h->mb.i_type == P_SKIP )
613                 h->mb.i_type = P_L0;
614             else if( h->mb.i_type == B_SKIP )
615                 h->mb.i_type = B_DIRECT;
616         }
617     }
618
619     if( h->mb.i_type == P_SKIP )
620     {
621         /* A bit special */
622         x264_macroblock_encode_pskip( h );
623         return;
624     }
625     if( h->mb.i_type == B_SKIP )
626     {
627         /* don't do bskip motion compensation if it was already done in macroblock_analyse */
628         if( !h->mb.b_skip_mc )
629             x264_mb_mc( h );
630         x264_macroblock_encode_skip( h );
631         return;
632     }
633
634     if( h->mb.i_type == I_16x16 )
635     {
636         const int i_mode = h->mb.i_intra16x16_pred_mode;
637         h->mb.b_transform_8x8 = 0;
638
639         if( h->mb.b_lossless )
640             x264_predict_lossless_16x16( h, i_mode );
641         else
642             h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
643
644         /* encode the 16x16 macroblock */
645         x264_mb_encode_i16x16( h, i_qp );
646     }
647     else if( h->mb.i_type == I_8x8 )
648     {
649         ALIGNED_ARRAY_16( pixel, edge,[33] );
650         h->mb.b_transform_8x8 = 1;
651         /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
652         if( h->mb.i_skip_intra )
653         {
654             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
655             M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i8x8_nnz_buf[0];
656             M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i8x8_nnz_buf[1];
657             M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i8x8_nnz_buf[2];
658             M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i8x8_nnz_buf[3];
659             h->mb.i_cbp_luma = h->mb.pic.i8x8_cbp;
660             /* In RD mode, restore the now-overwritten DCT data. */
661             if( h->mb.i_skip_intra == 2 )
662                 h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
663         }
664         for( int i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ )
665         {
666             pixel *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE];
667             int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
668             h->predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] );
669
670             if( h->mb.b_lossless )
671                 x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge );
672             else
673                 h->predict_8x8[i_mode]( p_dst, edge );
674
675             x264_mb_encode_i8x8( h, i, i_qp );
676         }
677     }
678     else if( h->mb.i_type == I_4x4 )
679     {
680         h->mb.b_transform_8x8 = 0;
681         /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
682         if( h->mb.i_skip_intra )
683         {
684             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
685             M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i4x4_nnz_buf[0];
686             M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i4x4_nnz_buf[1];
687             M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i4x4_nnz_buf[2];
688             M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i4x4_nnz_buf[3];
689             h->mb.i_cbp_luma = h->mb.pic.i4x4_cbp;
690             /* In RD mode, restore the now-overwritten DCT data. */
691             if( h->mb.i_skip_intra == 2 )
692                 h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
693         }
694         for( int i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ )
695         {
696             pixel *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]];
697             int i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
698
699             if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
700                 /* emulate missing topright samples */
701                 MPIXEL_X4( &p_dst[4-FDEC_STRIDE] ) = PIXEL_SPLAT_X4( p_dst[3-FDEC_STRIDE] );
702
703             if( h->mb.b_lossless )
704                 x264_predict_lossless_4x4( h, p_dst, i, i_mode );
705             else
706                 h->predict_4x4[i_mode]( p_dst );
707             x264_mb_encode_i4x4( h, i, i_qp );
708         }
709     }
710     else    /* Inter MB */
711     {
712         int i_decimate_mb = 0;
713
714         /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
715         if( !h->mb.b_skip_mc )
716             x264_mb_mc( h );
717
718         if( h->mb.b_lossless )
719         {
720             if( h->mb.b_transform_8x8 )
721                 for( int i8x8 = 0; i8x8 < 4; i8x8++ )
722                 {
723                     int x = i8x8&1;
724                     int y = i8x8>>1;
725                     int s8 = X264_SCAN8_0 + 2*x + 16*y;
726
727                     nz = h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8], h->mb.pic.p_fenc[0] + 8*x + 8*y*FENC_STRIDE,
728                                                                    h->mb.pic.p_fdec[0] + 8*x + 8*y*FDEC_STRIDE );
729                     STORE_8x8_NNZ( s8, nz );
730                     h->mb.i_cbp_luma |= nz << i8x8;
731                 }
732             else
733                 for( int i4x4 = 0; i4x4 < 16; i4x4++ )
734                 {
735                     nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4],
736                                         h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4],
737                                         h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] );
738                     h->mb.cache.non_zero_count[x264_scan8[i4x4]] = nz;
739                     h->mb.i_cbp_luma |= nz << (i4x4>>2);
740                 }
741         }
742         else if( h->mb.b_transform_8x8 )
743         {
744             ALIGNED_ARRAY_16( dctcoef, dct8x8,[4],[64] );
745             b_decimate &= !h->mb.b_trellis || !h->param.b_cabac; // 8x8 trellis is inherently optimal decimation for CABAC
746             h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
747             h->nr_count[1] += h->mb.b_noise_reduction * 4;
748
749             for( int idx = 0; idx < 4; idx++ )
750             {
751                 if( h->mb.b_noise_reduction )
752                     h->quantf.denoise_dct( dct8x8[idx], h->nr_residual_sum[1], h->nr_offset[1], 64 );
753                 nz = x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx );
754
755                 if( nz )
756                 {
757                     h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] );
758                     if( b_decimate )
759                     {
760                         int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] );
761                         i_decimate_mb += i_decimate_8x8;
762                         if( i_decimate_8x8 >= 4 )
763                             h->mb.i_cbp_luma |= 1<<idx;
764                     }
765                     else
766                         h->mb.i_cbp_luma |= 1<<idx;
767                 }
768             }
769
770             if( i_decimate_mb < 6 && b_decimate )
771             {
772                 h->mb.i_cbp_luma = 0;
773                 CLEAR_16x16_NNZ
774             }
775             else
776             {
777                 for( int idx = 0; idx < 4; idx++ )
778                 {
779                     int x = idx&1;
780                     int y = idx>>1;
781                     int s8 = X264_SCAN8_0 + 2*x + 16*y;
782
783                     if( h->mb.i_cbp_luma&(1<<idx) )
784                     {
785                         h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
786                         h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][8*x + 8*y*FDEC_STRIDE], dct8x8[idx] );
787                         STORE_8x8_NNZ( s8, 1 );
788                     }
789                     else
790                         STORE_8x8_NNZ( s8, 0 );
791                 }
792             }
793         }
794         else
795         {
796             ALIGNED_ARRAY_16( dctcoef, dct4x4,[16],[16] );
797             h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
798             h->nr_count[0] += h->mb.b_noise_reduction * 16;
799
800             for( int i8x8 = 0; i8x8 < 4; i8x8++ )
801             {
802                 int i_decimate_8x8 = 0;
803                 int cbp = 0;
804
805                 /* encode one 4x4 block */
806                 for( int i4x4 = 0; i4x4 < 4; i4x4++ )
807                 {
808                     int idx = i8x8 * 4 + i4x4;
809
810                     if( h->mb.b_noise_reduction )
811                         h->quantf.denoise_dct( dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 );
812                     nz = x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx );
813                     h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
814
815                     if( nz )
816                     {
817                         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] );
818                         h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[CQM_4PY], i_qp );
819                         if( b_decimate && i_decimate_8x8 < 6 )
820                             i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] );
821                         cbp = 1;
822                     }
823                 }
824
825                 int x = i8x8&1;
826                 int y = i8x8>>1;
827
828                 /* decimate this 8x8 block */
829                 i_decimate_mb += i_decimate_8x8;
830                 if( b_decimate )
831                 {
832                     if( i_decimate_8x8 < 4 )
833                     {
834                         int s8 = X264_SCAN8_0 + 2*x + 16*y;
835                         STORE_8x8_NNZ( s8, 0 );
836                     }
837                     else
838                         h->mb.i_cbp_luma |= 1<<i8x8;
839                 }
840                 else if( cbp )
841                 {
842                     h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][8*x + 8*y*FDEC_STRIDE], &dct4x4[i8x8*4] );
843                     h->mb.i_cbp_luma |= 1<<i8x8;
844                 }
845             }
846
847             if( b_decimate )
848             {
849                 if( i_decimate_mb < 6 )
850                 {
851                     h->mb.i_cbp_luma = 0;
852                     CLEAR_16x16_NNZ
853                 }
854                 else
855                 {
856                     for( int i8x8 = 0; i8x8 < 4; i8x8++ )
857                         if( h->mb.i_cbp_luma&(1<<i8x8) )
858                             h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
859                 }
860             }
861         }
862     }
863
864     /* encode chroma */
865     if( IS_INTRA( h->mb.i_type ) )
866     {
867         const int i_mode = h->mb.i_chroma_pred_mode;
868         if( h->mb.b_lossless )
869             x264_predict_lossless_8x8_chroma( h, i_mode );
870         else
871         {
872             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
873             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
874         }
875     }
876
877     /* encode the 8x8 blocks */
878     x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
879
880     /* store cbp */
881     int cbp = h->mb.i_cbp_chroma << 4 | h->mb.i_cbp_luma;
882     if( h->param.b_cabac )
883         cbp |= h->mb.cache.non_zero_count[x264_scan8[24]] << 8
884             |  h->mb.cache.non_zero_count[x264_scan8[25]] << 9
885             |  h->mb.cache.non_zero_count[x264_scan8[26]] << 10;
886     h->mb.cbp[h->mb.i_mb_xy] = cbp;
887
888     /* Check for P_SKIP
889      * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
890      *      (if multiple mv give same result)*/
891     if( !b_force_no_skip )
892     {
893         if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
894             !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
895             M32( h->mb.cache.mv[0][x264_scan8[0]] ) == M32( h->mb.cache.pskip_mv )
896             && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
897         {
898             h->mb.i_type = P_SKIP;
899         }
900
901         /* Check for B_SKIP */
902         if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
903         {
904             h->mb.i_type = B_SKIP;
905         }
906     }
907 }
908
909 /*****************************************************************************
910  * x264_macroblock_probe_skip:
911  *  Check if the current MB could be encoded as a [PB]_SKIP
912  *****************************************************************************/
913 int x264_macroblock_probe_skip( x264_t *h, int b_bidir )
914 {
915     ALIGNED_ARRAY_16( dctcoef, dct4x4,[4],[16] );
916     ALIGNED_ARRAY_16( dctcoef, dct2x2,[4] );
917     ALIGNED_ARRAY_16( dctcoef, dctscan,[16] );
918     ALIGNED_4( int16_t mvp[2] );
919
920     int i_qp = h->mb.i_qp;
921     int thresh, ssd;
922
923     if( !b_bidir )
924     {
925         /* Get the MV */
926         mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
927         mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
928
929         /* Motion compensation */
930         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
931                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
932                        mvp[0], mvp[1], 16, 16, &h->sh.weight[0][0] );
933     }
934
935     for( int i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
936     {
937         int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
938         int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
939         /* get luma diff */
940         h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset,
941                                     h->mb.pic.p_fdec[0] + fdec_offset );
942         /* encode one 4x4 block */
943         for( int i4x4 = 0; i4x4 < 4; i4x4++ )
944         {
945             if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] ) )
946                 continue;
947             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
948             i_decimate_mb += h->quantf.decimate_score16( dctscan );
949             if( i_decimate_mb >= 6 )
950                 return 0;
951         }
952     }
953
954     /* encode chroma */
955     i_qp = h->mb.i_chroma_qp;
956     thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
957
958     if( !b_bidir )
959     {
960         /* Special case for mv0, which is (of course) very common in P-skip mode. */
961         if( M32( mvp ) )
962             h->mc.mc_chroma( h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2], FDEC_STRIDE,
963                              h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
964                              mvp[0], mvp[1], 8, 8 );
965         else
966             h->mc.load_deinterleave_8x8x2_fdec( h->mb.pic.p_fdec[1], h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1] );
967     }
968
969     for( int ch = 0; ch < 2; ch++ )
970     {
971         pixel *p_src = h->mb.pic.p_fenc[1+ch];
972         pixel *p_dst = h->mb.pic.p_fdec[1+ch];
973
974         if( !b_bidir && h->sh.weight[0][1+ch].weightfn )
975             h->sh.weight[0][1+ch].weightfn[8>>2]( h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
976                                                   h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
977                                                   &h->sh.weight[0][1+ch], 8 );
978
979         /* there is almost never a termination during chroma, but we can't avoid the check entirely */
980         /* so instead we check SSD and skip the actual check if the score is low enough. */
981         ssd = h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE );
982         if( ssd < thresh )
983             continue;
984
985         /* The vast majority of chroma checks will terminate during the DC check or the higher
986          * threshold check, so we can save time by doing a DC-only DCT. */
987         h->dctf.sub8x8_dct_dc( dct2x2, p_src, p_dst );
988
989         if( h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 ) )
990             return 0;
991
992         /* If there wasn't a termination in DC, we can check against a much higher threshold. */
993         if( ssd < thresh*4 )
994             continue;
995
996         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
997
998         /* calculate dct coeffs */
999         for( int i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ )
1000         {
1001             dct4x4[i4x4][0] = 0;
1002             if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ) )
1003                 continue;
1004             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
1005             i_decimate_mb += h->quantf.decimate_score15( dctscan );
1006             if( i_decimate_mb >= 7 )
1007                 return 0;
1008         }
1009     }
1010
1011     h->mb.b_skip_mc = 1;
1012     return 1;
1013 }
1014
1015 /****************************************************************************
1016  * DCT-domain noise reduction / adaptive deadzone
1017  * from libavcodec
1018  ****************************************************************************/
1019
1020 void x264_noise_reduction_update( x264_t *h )
1021 {
1022     for( int cat = 0; cat < 2; cat++ )
1023     {
1024         int size = cat ? 64 : 16;
1025         const uint16_t *weight = cat ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
1026
1027         if( h->nr_count[cat] > (cat ? (1<<16) : (1<<18)) )
1028         {
1029             for( int i = 0; i < size; i++ )
1030                 h->nr_residual_sum[cat][i] >>= 1;
1031             h->nr_count[cat] >>= 1;
1032         }
1033
1034         for( int i = 0; i < size; i++ )
1035             h->nr_offset[cat][i] =
1036                 ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
1037                  + h->nr_residual_sum[cat][i]/2)
1038               / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
1039     }
1040 }
1041
1042 /*****************************************************************************
1043  * RD only; 4 calls to this do not make up for one macroblock_encode.
1044  * doesn't transform chroma dc.
1045  *****************************************************************************/
1046 void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
1047 {
1048     int i_qp = h->mb.i_qp;
1049     int x = i8&1;
1050     int y = i8>>1;
1051     int s8 = X264_SCAN8_0 + 2*x + 16*y;
1052     pixel *p_fenc = h->mb.pic.p_fenc[0] + 8*x + 8*y*FENC_STRIDE;
1053     pixel *p_fdec = h->mb.pic.p_fdec[0] + 8*x + 8*y*FDEC_STRIDE;
1054     int b_decimate = h->mb.b_dct_decimate;
1055     int nnz8x8 = 0;
1056     int nz;
1057
1058     if( !h->mb.b_skip_mc )
1059         x264_mb_mc_8x8( h, i8 );
1060
1061     if( h->mb.b_lossless )
1062     {
1063         if( h->mb.b_transform_8x8 )
1064         {
1065             nnz8x8 = h->zigzagf.sub_8x8( h->dct.luma8x8[i8], p_fenc, p_fdec );
1066             STORE_8x8_NNZ( s8, nnz8x8 );
1067         }
1068         else
1069         {
1070             for( int i4 = i8*4; i4 < i8*4+4; i4++ )
1071             {
1072                 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4],
1073                                     h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4],
1074                                     h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4] );
1075                 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1076                 nnz8x8 |= nz;
1077             }
1078         }
1079         for( int ch = 0; ch < 2; ch++ )
1080         {
1081             dctcoef dc;
1082             p_fenc = h->mb.pic.p_fenc[1+ch] + 4*x + 4*y*FENC_STRIDE;
1083             p_fdec = h->mb.pic.p_fdec[1+ch] + 4*x + 4*y*FDEC_STRIDE;
1084             nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i8+ch*4], p_fenc, p_fdec, &dc );
1085             h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1086         }
1087     }
1088     else
1089     {
1090         if( h->mb.b_transform_8x8 )
1091         {
1092             ALIGNED_ARRAY_16( dctcoef, dct8x8,[64] );
1093             h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec );
1094             nnz8x8 = x264_quant_8x8( h, dct8x8, i_qp, 0, i8 );
1095             if( nnz8x8 )
1096             {
1097                 h->zigzagf.scan_8x8( h->dct.luma8x8[i8], dct8x8 );
1098
1099                 if( b_decimate && !h->mb.b_trellis )
1100                     nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[i8] );
1101
1102                 if( nnz8x8 )
1103                 {
1104                     h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8PY], i_qp );
1105                     h->dctf.add8x8_idct8( p_fdec, dct8x8 );
1106                     STORE_8x8_NNZ( s8, 1 );
1107                 }
1108                 else
1109                     STORE_8x8_NNZ( s8, 0 );
1110             }
1111             else
1112                 STORE_8x8_NNZ( s8, 0 );
1113         }
1114         else
1115         {
1116             int i_decimate_8x8 = 0;
1117             ALIGNED_ARRAY_16( dctcoef, dct4x4,[4],[16] );
1118             h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec );
1119             for( int i4 = 0; i4 < 4; i4++ )
1120             {
1121                 nz = x264_quant_4x4( h, dct4x4[i4], i_qp, DCT_LUMA_4x4, 0, i8*4+i4 );
1122                 h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = nz;
1123                 if( nz )
1124                 {
1125                     h->zigzagf.scan_4x4( h->dct.luma4x4[i8*4+i4], dct4x4[i4] );
1126                     h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[CQM_4PY], i_qp );
1127                     if( b_decimate )
1128                         i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[i8*4+i4] );
1129                     nnz8x8 = 1;
1130                 }
1131             }
1132
1133             if( b_decimate && i_decimate_8x8 < 4 )
1134                 nnz8x8 = 0;
1135
1136             if( nnz8x8 )
1137                 h->dctf.add8x8_idct( p_fdec, dct4x4 );
1138             else
1139                 STORE_8x8_NNZ( s8, 0 );
1140         }
1141
1142         i_qp = h->mb.i_chroma_qp;
1143
1144         for( int ch = 0; ch < 2; ch++ )
1145         {
1146             ALIGNED_ARRAY_16( dctcoef, dct4x4,[16] );
1147             p_fenc = h->mb.pic.p_fenc[1+ch] + 4*x + 4*y*FENC_STRIDE;
1148             p_fdec = h->mb.pic.p_fdec[1+ch] + 4*x + 4*y*FDEC_STRIDE;
1149
1150             h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1151             dct4x4[0] = 0;
1152
1153             if( h->mb.b_trellis )
1154                 nz = x264_quant_4x4_trellis( h, dct4x4, CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 1, 0 );
1155             else
1156                 nz = h->quantf.quant_4x4( dct4x4, h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
1157
1158             h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1159             if( nz )
1160             {
1161                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i8+ch*4], dct4x4 );
1162                 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PC], i_qp );
1163                 h->dctf.add4x4_idct( p_fdec, dct4x4 );
1164             }
1165         }
1166     }
1167     h->mb.i_cbp_luma &= ~(1 << i8);
1168     h->mb.i_cbp_luma |= nnz8x8 << i8;
1169     h->mb.i_cbp_chroma = 0x02;
1170 }
1171
1172 /*****************************************************************************
1173  * RD only, luma only
1174  *****************************************************************************/
1175 void x264_macroblock_encode_p4x4( x264_t *h, int i4 )
1176 {
1177     int i_qp = h->mb.i_qp;
1178     pixel *p_fenc = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[i4]];
1179     pixel *p_fdec = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
1180     int nz;
1181
1182     /* Don't need motion compensation as this function is only used in qpel-RD, which caches pixel data. */
1183
1184     if( h->mb.b_lossless )
1185     {
1186         nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4], p_fenc, p_fdec );
1187         h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1188     }
1189     else
1190     {
1191         ALIGNED_ARRAY_16( dctcoef, dct4x4,[16] );
1192         h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1193         nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 0, i4 );
1194         h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1195         if( nz )
1196         {
1197             h->zigzagf.scan_4x4( h->dct.luma4x4[i4], dct4x4 );
1198             h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PY], i_qp );
1199             h->dctf.add4x4_idct( p_fdec, dct4x4 );
1200         }
1201     }
1202 }