]> git.sesse.net Git - x264/blob - encoder/macroblock.c
Fix intra refresh behavior with I-frames
[x264] / encoder / macroblock.c
1 /*****************************************************************************
2  * macroblock.c: h264 encoder library
3  *****************************************************************************
4  * Copyright (C) 2003-2008 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *****************************************************************************/
24
25 #include "common/common.h"
26 #include "macroblock.h"
27
28 /* These chroma DC functions don't have assembly versions and are only used here. */
29
30 #define ZIG(i,y,x) level[i] = dct[x*2+y];
31 static inline void zigzag_scan_2x2_dc( int16_t level[4], int16_t dct[4] )
32 {
33     ZIG(0,0,0)
34     ZIG(1,0,1)
35     ZIG(2,1,0)
36     ZIG(3,1,1)
37 }
38 #undef ZIG
39
40 #define IDCT_DEQUANT_START \
41     int d0 = dct[0] + dct[1]; \
42     int d1 = dct[2] + dct[3]; \
43     int d2 = dct[0] - dct[1]; \
44     int d3 = dct[2] - dct[3]; \
45     int dmf = dequant_mf[i_qp%6][0] << i_qp/6;
46
47 static inline void idct_dequant_2x2_dc( int16_t dct[4], int16_t dct4x4[4][16], int dequant_mf[6][16], int i_qp )
48 {
49     IDCT_DEQUANT_START
50     dct4x4[0][0] = (d0 + d1) * dmf >> 5;
51     dct4x4[1][0] = (d0 - d1) * dmf >> 5;
52     dct4x4[2][0] = (d2 + d3) * dmf >> 5;
53     dct4x4[3][0] = (d2 - d3) * dmf >> 5;
54 }
55
56 static inline void idct_dequant_2x2_dconly( int16_t out[4], int16_t dct[4], int dequant_mf[6][16], int i_qp )
57 {
58     IDCT_DEQUANT_START
59     out[0] = (d0 + d1) * dmf >> 5;
60     out[1] = (d0 - d1) * dmf >> 5;
61     out[2] = (d2 + d3) * dmf >> 5;
62     out[3] = (d2 - d3) * dmf >> 5;
63 }
64
65 static inline void dct2x2dc( int16_t d[4], int16_t dct4x4[4][16] )
66 {
67     int d0 = dct4x4[0][0] + dct4x4[1][0];
68     int d1 = dct4x4[2][0] + dct4x4[3][0];
69     int d2 = dct4x4[0][0] - dct4x4[1][0];
70     int d3 = dct4x4[2][0] - dct4x4[3][0];
71     d[0] = d0 + d1;
72     d[2] = d2 + d3;
73     d[1] = d0 - d1;
74     d[3] = d2 - d3;
75     dct4x4[0][0] = 0;
76     dct4x4[1][0] = 0;
77     dct4x4[2][0] = 0;
78     dct4x4[3][0] = 0;
79 }
80
81 static ALWAYS_INLINE int x264_quant_4x4( x264_t *h, int16_t dct[16], int i_qp, int i_ctxBlockCat, int b_intra, int idx )
82 {
83     int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY;
84     if( h->mb.b_trellis )
85         return x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, i_ctxBlockCat, b_intra, 0, idx );
86     else
87         return h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
88 }
89
90 static ALWAYS_INLINE int x264_quant_8x8( x264_t *h, int16_t dct[64], int i_qp, int b_intra, int idx )
91 {
92     int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY;
93     if( h->mb.b_trellis )
94         return x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx );
95     else
96         return h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] );
97 }
98
99 /* All encoding functions must output the correct CBP and NNZ values.
100  * The entropy coding functions will check CBP first, then NNZ, before
101  * actually reading the DCT coefficients.  NNZ still must be correct even
102  * if CBP is zero because of the use of NNZ values for context selection.
103  * "NNZ" need only be 0 or 1 rather than the exact coefficient count because
104  * that is only needed in CAVLC, and will be calculated by CAVLC's residual
105  * coding and stored as necessary. */
106
107 /* This means that decimation can be done merely by adjusting the CBP and NNZ
108  * rather than memsetting the coefficients. */
109
110 void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp )
111 {
112     int nz;
113     uint8_t *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]];
114     uint8_t *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]];
115     ALIGNED_ARRAY_16( int16_t, dct4x4,[16] );
116
117     if( h->mb.b_lossless )
118     {
119         nz = h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst );
120         h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
121         h->mb.i_cbp_luma |= nz<<(idx>>2);
122         return;
123     }
124
125     h->dctf.sub4x4_dct( dct4x4, p_src, p_dst );
126
127     nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx );
128     h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
129     if( nz )
130     {
131         h->mb.i_cbp_luma |= 1<<(idx>>2);
132         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 );
133         h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp );
134         h->dctf.add4x4_idct( p_dst, dct4x4 );
135     }
136 }
137
138 #define STORE_8x8_NNZ(idx,nz)\
139 {\
140     M16( &h->mb.cache.non_zero_count[x264_scan8[idx*4+0]] ) = nz * 0x0101;\
141     M16( &h->mb.cache.non_zero_count[x264_scan8[idx*4+2]] ) = nz * 0x0101;\
142 }
143
144 #define CLEAR_16x16_NNZ \
145 {\
146     M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = 0;\
147     M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = 0;\
148     M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = 0;\
149     M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = 0;\
150 }
151
152 void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp )
153 {
154     int x = 8 * (idx&1);
155     int y = 8 * (idx>>1);
156     int nz;
157     uint8_t *p_src = &h->mb.pic.p_fenc[0][x+y*FENC_STRIDE];
158     uint8_t *p_dst = &h->mb.pic.p_fdec[0][x+y*FDEC_STRIDE];
159     ALIGNED_ARRAY_16( int16_t, dct8x8,[64] );
160
161     if( h->mb.b_lossless )
162     {
163         nz = h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst );
164         STORE_8x8_NNZ(idx,nz);
165         h->mb.i_cbp_luma |= nz<<idx;
166         return;
167     }
168
169     h->dctf.sub8x8_dct8( dct8x8, p_src, p_dst );
170
171     nz = x264_quant_8x8( h, dct8x8, i_qp, 1, idx );
172     if( nz )
173     {
174         h->mb.i_cbp_luma |= 1<<idx;
175         h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 );
176         h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp );
177         h->dctf.add8x8_idct8( p_dst, dct8x8 );
178         STORE_8x8_NNZ(idx,1);
179     }
180     else
181         STORE_8x8_NNZ(idx,0);
182 }
183
184 static void x264_mb_encode_i16x16( x264_t *h, int i_qp )
185 {
186     uint8_t  *p_src = h->mb.pic.p_fenc[0];
187     uint8_t  *p_dst = h->mb.pic.p_fdec[0];
188
189     ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[16] );
190     ALIGNED_ARRAY_16( int16_t, dct_dc4x4,[16] );
191
192     int nz;
193     int decimate_score = h->mb.b_dct_decimate ? 0 : 9;
194
195     if( h->mb.b_lossless )
196     {
197         for( int i = 0; i < 16; i++ )
198         {
199             int oe = block_idx_xy_fenc[i];
200             int od = block_idx_xy_fdec[i];
201             nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[i], p_src+oe, p_dst+od, &dct_dc4x4[block_idx_yx_1d[i]] );
202             h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
203             h->mb.i_cbp_luma |= nz;
204         }
205         h->mb.i_cbp_luma *= 0xf;
206         h->mb.cache.non_zero_count[x264_scan8[24]] = array_non_zero( dct_dc4x4 );
207         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
208         return;
209     }
210
211     h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
212
213     for( int i = 0; i < 16; i++ )
214     {
215         /* copy dc coeff */
216         dct_dc4x4[block_idx_xy_1d[i]] = dct4x4[i][0];
217         dct4x4[i][0] = 0;
218
219         /* quant/scan/dequant */
220         nz = x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i );
221         h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
222         if( nz )
223         {
224             h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] );
225             h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp );
226             if( decimate_score < 6 ) decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[i] );
227             h->mb.i_cbp_luma = 0xf;
228         }
229     }
230
231     /* Writing the 16 CBFs in an i16x16 block is quite costly, so decimation can save many bits. */
232     /* More useful with CAVLC, but still useful with CABAC. */
233     if( decimate_score < 6 )
234     {
235         h->mb.i_cbp_luma = 0;
236         CLEAR_16x16_NNZ
237     }
238
239     h->dctf.dct4x4dc( dct_dc4x4 );
240     if( h->mb.b_trellis )
241         nz = x264_quant_dc_trellis( h, dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1, 0 );
242     else
243         nz = h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 );
244
245     h->mb.cache.non_zero_count[x264_scan8[24]] = nz;
246     if( nz )
247     {
248         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
249
250         /* output samples to fdec */
251         h->dctf.idct4x4dc( dct_dc4x4 );
252         h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp );  /* XXX not inversed */
253         if( h->mb.i_cbp_luma )
254             for( int i = 0; i < 16; i++ )
255                 dct4x4[i][0] = dct_dc4x4[block_idx_xy_1d[i]];
256     }
257
258     /* put pixels to fdec */
259     if( h->mb.i_cbp_luma )
260         h->dctf.add16x16_idct( p_dst, dct4x4 );
261     else if( nz )
262         h->dctf.add16x16_idct_dc( p_dst, dct_dc4x4 );
263 }
264
265 static inline int idct_dequant_round_2x2_dc( int16_t ref[4], int16_t dct[4], int dequant_mf[6][16], int i_qp )
266 {
267     int16_t out[4];
268     idct_dequant_2x2_dconly( out, dct, dequant_mf, i_qp );
269     return ((ref[0] ^ (out[0]+32))
270           | (ref[1] ^ (out[1]+32))
271           | (ref[2] ^ (out[2]+32))
272           | (ref[3] ^ (out[3]+32))) >> 6;
273 }
274
275 /* Round down coefficients losslessly in DC-only chroma blocks.
276  * Unlike luma blocks, this can't be done with a lookup table or
277  * other shortcut technique because of the interdependencies
278  * between the coefficients due to the chroma DC transform. */
279 static inline int x264_mb_optimize_chroma_dc( x264_t *h, int b_inter, int i_qp, int16_t dct2x2[4] )
280 {
281     int16_t dct2x2_orig[4];
282     int coeff, nz;
283
284     /* If the QP is too high, there's no benefit to rounding optimization. */
285     if( h->dequant4_mf[CQM_4IC + b_inter][i_qp%6][0] << (i_qp/6) > 32*64 )
286         return 1;
287
288     idct_dequant_2x2_dconly( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
289     dct2x2_orig[0] += 32;
290     dct2x2_orig[1] += 32;
291     dct2x2_orig[2] += 32;
292     dct2x2_orig[3] += 32;
293
294     /* If the DC coefficients already round to zero, terminate early. */
295     if( !((dct2x2_orig[0]|dct2x2_orig[1]|dct2x2_orig[2]|dct2x2_orig[3])>>6) )
296         return 0;
297
298     /* Start with the highest frequency coefficient... is this the best option? */
299     for( nz = 0, coeff = h->quantf.coeff_last[DCT_CHROMA_DC]( dct2x2 ); coeff >= 0; coeff-- )
300     {
301         int level = dct2x2[coeff];
302         int sign = level>>31 | 1; /* dct2x2[coeff] < 0 ? -1 : 1 */
303
304         while( level )
305         {
306             dct2x2[coeff] = level - sign;
307             if( idct_dequant_round_2x2_dc( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp ) )
308             {
309                 nz = 1;
310                 dct2x2[coeff] = level;
311                 break;
312             }
313             level -= sign;
314         }
315     }
316
317     return nz;
318 }
319
320 void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp )
321 {
322     int nz, nz_dc;
323     int b_decimate = b_inter && h->mb.b_dct_decimate;
324     ALIGNED_ARRAY_16( int16_t, dct2x2,[4] );
325     h->mb.i_cbp_chroma = 0;
326
327     /* Early termination: check variance of chroma residual before encoding.
328      * Don't bother trying early termination at low QPs.
329      * Values are experimentally derived. */
330     if( b_decimate && i_qp >= (h->mb.b_trellis ? 12 : 18) )
331     {
332         int thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
333         int ssd[2];
334         int score  = h->pixf.var2_8x8( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE, &ssd[0] );
335             score += h->pixf.var2_8x8( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE, &ssd[1] );
336         if( score < thresh*4 )
337         {
338             h->mb.cache.non_zero_count[x264_scan8[16]] = 0;
339             h->mb.cache.non_zero_count[x264_scan8[17]] = 0;
340             h->mb.cache.non_zero_count[x264_scan8[18]] = 0;
341             h->mb.cache.non_zero_count[x264_scan8[19]] = 0;
342             h->mb.cache.non_zero_count[x264_scan8[20]] = 0;
343             h->mb.cache.non_zero_count[x264_scan8[21]] = 0;
344             h->mb.cache.non_zero_count[x264_scan8[22]] = 0;
345             h->mb.cache.non_zero_count[x264_scan8[23]] = 0;
346             M16( &h->mb.cache.non_zero_count[x264_scan8[25]] ) = 0;
347
348             for( int ch = 0; ch < 2; ch++ )
349             {
350                 if( ssd[ch] > thresh )
351                 {
352                     h->dctf.sub8x8_dct_dc( dct2x2, h->mb.pic.p_fenc[1+ch], h->mb.pic.p_fdec[1+ch] );
353                     if( h->mb.b_trellis )
354                         nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
355                     else
356                         nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
357
358                     if( nz_dc )
359                     {
360                         if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
361                             continue;
362                         h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 1;
363                         zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
364                         idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
365                         h->dctf.add8x8_idct_dc( h->mb.pic.p_fdec[1+ch], dct2x2 );
366                         h->mb.i_cbp_chroma = 1;
367                     }
368                 }
369             }
370             return;
371         }
372     }
373
374     for( int ch = 0; ch < 2; ch++ )
375     {
376         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
377         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
378         int i_decimate_score = 0;
379         int nz_ac = 0;
380
381         ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[16] );
382
383         if( h->mb.b_lossless )
384         {
385             for( int i = 0; i < 4; i++ )
386             {
387                 int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE;
388                 int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE;
389                 nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od, &h->dct.chroma_dc[ch][i] );
390                 h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
391                 h->mb.i_cbp_chroma |= nz;
392             }
393             h->mb.cache.non_zero_count[x264_scan8[25]+ch] = array_non_zero( h->dct.chroma_dc[ch] );
394             continue;
395         }
396
397         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
398         dct2x2dc( dct2x2, dct4x4 );
399         /* calculate dct coeffs */
400         for( int i = 0; i < 4; i++ )
401         {
402             if( h->mb.b_trellis )
403                 nz = x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 1, 0 );
404             else
405                 nz = h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] );
406             h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
407             if( nz )
408             {
409                 nz_ac = 1;
410                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] );
411                 h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp );
412                 if( b_decimate )
413                     i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] );
414             }
415         }
416
417         if( h->mb.b_trellis )
418             nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
419         else
420             nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
421
422         h->mb.cache.non_zero_count[x264_scan8[25]+ch] = nz_dc;
423
424         if( (b_decimate && i_decimate_score < 7) || !nz_ac )
425         {
426             /* Decimate the block */
427             h->mb.cache.non_zero_count[x264_scan8[16+0]+24*ch] = 0;
428             h->mb.cache.non_zero_count[x264_scan8[16+1]+24*ch] = 0;
429             h->mb.cache.non_zero_count[x264_scan8[16+2]+24*ch] = 0;
430             h->mb.cache.non_zero_count[x264_scan8[16+3]+24*ch] = 0;
431             if( !nz_dc ) /* Whole block is empty */
432                 continue;
433             if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
434             {
435                 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 0;
436                 continue;
437             }
438             /* DC-only */
439             zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
440             idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
441             h->dctf.add8x8_idct_dc( p_dst, dct2x2 );
442         }
443         else
444         {
445             h->mb.i_cbp_chroma = 1;
446             if( nz_dc )
447             {
448                 zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
449                 idct_dequant_2x2_dc( dct2x2, dct4x4, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
450             }
451             h->dctf.add8x8_idct( p_dst, dct4x4 );
452         }
453     }
454
455     /* 0 = none, 1 = DC only, 2 = DC+AC */
456     h->mb.i_cbp_chroma = ((!!M16( &h->mb.cache.non_zero_count[x264_scan8[25]] )) | h->mb.i_cbp_chroma) + h->mb.i_cbp_chroma;
457 }
458
459 static void x264_macroblock_encode_skip( x264_t *h )
460 {
461     for( int i = 0; i < sizeof( h->mb.cache.non_zero_count ); i += 16 )
462         M128( &h->mb.cache.non_zero_count[i] ) = M128_ZERO;
463     h->mb.i_cbp_luma = 0;
464     h->mb.i_cbp_chroma = 0;
465     h->mb.cbp[h->mb.i_mb_xy] = 0;
466 }
467
468 /*****************************************************************************
469  * x264_macroblock_encode_pskip:
470  *  Encode an already marked skip block
471  *****************************************************************************/
472 static void x264_macroblock_encode_pskip( x264_t *h )
473 {
474     /* don't do pskip motion compensation if it was already done in macroblock_analyse */
475     if( !h->mb.b_skip_mc )
476     {
477         int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
478                               h->mb.mv_min[0], h->mb.mv_max[0] );
479         int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
480                               h->mb.mv_min[1], h->mb.mv_max[1] );
481
482         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
483                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
484                        mvx, mvy, 16, 16, &h->sh.weight[0][0] );
485
486         /* Special case for mv0, which is (of course) very common in P-skip mode. */
487         if( mvx | mvy )
488         {
489             h->mc.mc_chroma( h->mb.pic.p_fdec[1],       FDEC_STRIDE,
490                              h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
491                              mvx, mvy, 8, 8 );
492             h->mc.mc_chroma( h->mb.pic.p_fdec[2],       FDEC_STRIDE,
493                              h->mb.pic.p_fref[0][0][5], h->mb.pic.i_stride[2],
494                              mvx, mvy, 8, 8 );
495         }
496         else
497         {
498             h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1], 8 );
499             h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fref[0][0][5], h->mb.pic.i_stride[2], 8 );
500         }
501
502         if( h->sh.weight[0][1].weightfn )
503             h->sh.weight[0][1].weightfn[8>>2]( h->mb.pic.p_fdec[1], FDEC_STRIDE,
504                                                h->mb.pic.p_fdec[1], FDEC_STRIDE,
505                                                &h->sh.weight[0][1], 8 );
506
507         if( h->sh.weight[0][2].weightfn )
508             h->sh.weight[0][2].weightfn[8>>2]( h->mb.pic.p_fdec[2], FDEC_STRIDE,
509                                                h->mb.pic.p_fdec[2], FDEC_STRIDE,
510                                                &h->sh.weight[0][2], 8 );
511     }
512
513     x264_macroblock_encode_skip( h );
514 }
515
516 /*****************************************************************************
517  * Intra prediction for predictive lossless mode.
518  *****************************************************************************/
519
520 /* Note that these functions take a shortcut (mc.copy instead of actual pixel prediction) which assumes
521  * that the edge pixels of the reconstructed frame are the same as that of the source frame.  This means
522  * they will only work correctly if the neighboring blocks are losslessly coded.  In practice, this means
523  * lossless mode cannot be mixed with lossy mode within a frame. */
524 /* This can be resolved by explicitly copying the edge pixels after doing the mc.copy, but this doesn't
525  * need to be done unless we decide to allow mixing lossless and lossy compression. */
526
527 void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode )
528 {
529     int stride = h->fenc->i_stride[1] << h->mb.b_interlaced;
530     if( i_mode == I_PRED_CHROMA_V )
531     {
532         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-stride, stride, 8 );
533         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-stride, stride, 8 );
534     }
535     else if( i_mode == I_PRED_CHROMA_H )
536     {
537         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-1, stride, 8 );
538         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-1, stride, 8 );
539     }
540     else
541     {
542         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
543         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
544     }
545 }
546
547 void x264_predict_lossless_4x4( x264_t *h, uint8_t *p_dst, int idx, int i_mode )
548 {
549     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
550     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
551
552     if( i_mode == I_PRED_4x4_V )
553         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
554     else if( i_mode == I_PRED_4x4_H )
555         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
556     else
557         h->predict_4x4[i_mode]( p_dst );
558 }
559
560 void x264_predict_lossless_8x8( x264_t *h, uint8_t *p_dst, int idx, int i_mode, uint8_t edge[33] )
561 {
562     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
563     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
564
565     if( i_mode == I_PRED_8x8_V )
566         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
567     else if( i_mode == I_PRED_8x8_H )
568         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
569     else
570         h->predict_8x8[i_mode]( p_dst, edge );
571 }
572
573 void x264_predict_lossless_16x16( x264_t *h, int i_mode )
574 {
575     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
576     if( i_mode == I_PRED_16x16_V )
577         h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
578     else if( i_mode == I_PRED_16x16_H )
579         h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 );
580     else
581         h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
582 }
583
584 /*****************************************************************************
585  * x264_macroblock_encode:
586  *****************************************************************************/
587 void x264_macroblock_encode( x264_t *h )
588 {
589     int i_qp = h->mb.i_qp;
590     int b_decimate = h->mb.b_dct_decimate;
591     int b_force_no_skip = 0;
592     int nz;
593     h->mb.i_cbp_luma = 0;
594     h->mb.cache.non_zero_count[x264_scan8[24]] = 0;
595
596     if( h->mb.i_type == I_PCM )
597     {
598         /* if PCM is chosen, we need to store reconstructed frame data */
599         h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc[0], FENC_STRIDE, 16 );
600         h->mc.copy[PIXEL_8x8]  ( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc[1], FENC_STRIDE, 8 );
601         h->mc.copy[PIXEL_8x8]  ( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc[2], FENC_STRIDE, 8 );
602         return;
603     }
604
605     if( h->sh.b_mbaff
606         && h->mb.i_mb_xy == h->sh.i_first_mb + h->mb.i_mb_stride
607         && IS_SKIP(h->mb.type[h->sh.i_first_mb]) )
608     {
609         /* The first skip is predicted to be a frame mb pair.
610          * We don't yet support the aff part of mbaff, so force it to non-skip
611          * so that we can pick the aff flag. */
612         b_force_no_skip = 1;
613         if( IS_SKIP(h->mb.i_type) )
614         {
615             if( h->mb.i_type == P_SKIP )
616                 h->mb.i_type = P_L0;
617             else if( h->mb.i_type == B_SKIP )
618                 h->mb.i_type = B_DIRECT;
619         }
620     }
621
622     if( h->mb.i_type == P_SKIP )
623     {
624         /* A bit special */
625         x264_macroblock_encode_pskip( h );
626         return;
627     }
628     if( h->mb.i_type == B_SKIP )
629     {
630         /* don't do bskip motion compensation if it was already done in macroblock_analyse */
631         if( !h->mb.b_skip_mc )
632             x264_mb_mc( h );
633         x264_macroblock_encode_skip( h );
634         return;
635     }
636
637     if( h->mb.i_type == I_16x16 )
638     {
639         const int i_mode = h->mb.i_intra16x16_pred_mode;
640         h->mb.b_transform_8x8 = 0;
641
642         if( h->mb.b_lossless )
643             x264_predict_lossless_16x16( h, i_mode );
644         else
645             h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
646
647         /* encode the 16x16 macroblock */
648         x264_mb_encode_i16x16( h, i_qp );
649     }
650     else if( h->mb.i_type == I_8x8 )
651     {
652         ALIGNED_ARRAY_16( uint8_t, edge,[33] );
653         h->mb.b_transform_8x8 = 1;
654         /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
655         if( h->mb.i_skip_intra )
656         {
657             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
658             M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i8x8_nnz_buf[0];
659             M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i8x8_nnz_buf[1];
660             M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i8x8_nnz_buf[2];
661             M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i8x8_nnz_buf[3];
662             h->mb.i_cbp_luma = h->mb.pic.i8x8_cbp;
663             /* In RD mode, restore the now-overwritten DCT data. */
664             if( h->mb.i_skip_intra == 2 )
665                 h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
666         }
667         for( int i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ )
668         {
669             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE];
670             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
671             h->predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] );
672
673             if( h->mb.b_lossless )
674                 x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge );
675             else
676                 h->predict_8x8[i_mode]( p_dst, edge );
677
678             x264_mb_encode_i8x8( h, i, i_qp );
679         }
680     }
681     else if( h->mb.i_type == I_4x4 )
682     {
683         h->mb.b_transform_8x8 = 0;
684         /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
685         if( h->mb.i_skip_intra )
686         {
687             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
688             M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i4x4_nnz_buf[0];
689             M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i4x4_nnz_buf[1];
690             M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i4x4_nnz_buf[2];
691             M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i4x4_nnz_buf[3];
692             h->mb.i_cbp_luma = h->mb.pic.i4x4_cbp;
693             /* In RD mode, restore the now-overwritten DCT data. */
694             if( h->mb.i_skip_intra == 2 )
695                 h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
696         }
697         for( int i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ )
698         {
699             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]];
700             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
701
702             if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
703                 /* emulate missing topright samples */
704                 M32( &p_dst[4-FDEC_STRIDE] ) = p_dst[3-FDEC_STRIDE] * 0x01010101U;
705
706             if( h->mb.b_lossless )
707                 x264_predict_lossless_4x4( h, p_dst, i, i_mode );
708             else
709                 h->predict_4x4[i_mode]( p_dst );
710             x264_mb_encode_i4x4( h, i, i_qp );
711         }
712     }
713     else    /* Inter MB */
714     {
715         int i_decimate_mb = 0;
716
717         /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
718         if( !h->mb.b_skip_mc )
719             x264_mb_mc( h );
720
721         if( h->mb.b_lossless )
722         {
723             if( h->mb.b_transform_8x8 )
724                 for( int i8x8 = 0; i8x8 < 4; i8x8++ )
725                 {
726                     int x = 8*(i8x8&1);
727                     int y = 8*(i8x8>>1);
728                     nz = h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8],
729                                         h->mb.pic.p_fenc[0]+x+y*FENC_STRIDE,
730                                         h->mb.pic.p_fdec[0]+x+y*FDEC_STRIDE );
731                     STORE_8x8_NNZ(i8x8,nz);
732                     h->mb.i_cbp_luma |= nz << i8x8;
733                 }
734             else
735                 for( int i4x4 = 0; i4x4 < 16; i4x4++ )
736                 {
737                     nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4],
738                                         h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4],
739                                         h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] );
740                     h->mb.cache.non_zero_count[x264_scan8[i4x4]] = nz;
741                     h->mb.i_cbp_luma |= nz << (i4x4>>2);
742                 }
743         }
744         else if( h->mb.b_transform_8x8 )
745         {
746             ALIGNED_ARRAY_16( int16_t, dct8x8,[4],[64] );
747             b_decimate &= !h->mb.b_trellis; // 8x8 trellis is inherently optimal decimation
748             h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
749             h->nr_count[1] += h->mb.b_noise_reduction * 4;
750
751             for( int idx = 0; idx < 4; idx++ )
752             {
753                 if( h->mb.b_noise_reduction )
754                     h->quantf.denoise_dct( dct8x8[idx], h->nr_residual_sum[1], h->nr_offset[1], 64 );
755                 nz = x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx );
756
757                 if( nz )
758                 {
759                     h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] );
760                     if( b_decimate )
761                     {
762                         int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] );
763                         i_decimate_mb += i_decimate_8x8;
764                         if( i_decimate_8x8 >= 4 )
765                             h->mb.i_cbp_luma |= 1<<idx;
766                     }
767                     else
768                         h->mb.i_cbp_luma |= 1<<idx;
769                 }
770             }
771
772             if( i_decimate_mb < 6 && b_decimate )
773             {
774                 h->mb.i_cbp_luma = 0;
775                 CLEAR_16x16_NNZ
776             }
777             else
778             {
779                 for( int idx = 0; idx < 4; idx++ )
780                 {
781                     if( h->mb.i_cbp_luma&(1<<idx) )
782                     {
783                         h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
784                         h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][(idx&1)*8 + (idx>>1)*8*FDEC_STRIDE], dct8x8[idx] );
785                         STORE_8x8_NNZ(idx,1);
786                     }
787                     else
788                         STORE_8x8_NNZ(idx,0);
789                 }
790             }
791         }
792         else
793         {
794             ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[16] );
795             h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
796             h->nr_count[0] += h->mb.b_noise_reduction * 16;
797
798             for( int i8x8 = 0; i8x8 < 4; i8x8++ )
799             {
800                 int i_decimate_8x8 = 0;
801                 int cbp = 0;
802
803                 /* encode one 4x4 block */
804                 for( int i4x4 = 0; i4x4 < 4; i4x4++ )
805                 {
806                     int idx = i8x8 * 4 + i4x4;
807
808                     if( h->mb.b_noise_reduction )
809                         h->quantf.denoise_dct( dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 );
810                     nz = x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx );
811                     h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
812
813                     if( nz )
814                     {
815                         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] );
816                         h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[CQM_4PY], i_qp );
817                         if( b_decimate && i_decimate_8x8 < 6 )
818                             i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] );
819                         cbp = 1;
820                     }
821                 }
822
823                 /* decimate this 8x8 block */
824                 i_decimate_mb += i_decimate_8x8;
825                 if( b_decimate )
826                 {
827                     if( i_decimate_8x8 < 4 )
828                         STORE_8x8_NNZ(i8x8,0)
829                     else
830                         h->mb.i_cbp_luma |= 1<<i8x8;
831                 }
832                 else if( cbp )
833                 {
834                     h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
835                     h->mb.i_cbp_luma |= 1<<i8x8;
836                 }
837             }
838
839             if( b_decimate )
840             {
841                 if( i_decimate_mb < 6 )
842                 {
843                     h->mb.i_cbp_luma = 0;
844                     CLEAR_16x16_NNZ
845                 }
846                 else
847                 {
848                     for( int i8x8 = 0; i8x8 < 4; i8x8++ )
849                         if( h->mb.i_cbp_luma&(1<<i8x8) )
850                             h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
851                 }
852             }
853         }
854     }
855
856     /* encode chroma */
857     if( IS_INTRA( h->mb.i_type ) )
858     {
859         const int i_mode = h->mb.i_chroma_pred_mode;
860         if( h->mb.b_lossless )
861             x264_predict_lossless_8x8_chroma( h, i_mode );
862         else
863         {
864             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
865             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
866         }
867     }
868
869     /* encode the 8x8 blocks */
870     x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
871
872     /* store cbp */
873     int cbp = h->mb.i_cbp_chroma << 4 | h->mb.i_cbp_luma;
874     if( h->param.b_cabac )
875         cbp |= h->mb.cache.non_zero_count[x264_scan8[24]] << 8
876             |  h->mb.cache.non_zero_count[x264_scan8[25]] << 9
877             |  h->mb.cache.non_zero_count[x264_scan8[26]] << 10;
878     h->mb.cbp[h->mb.i_mb_xy] = cbp;
879
880     /* Check for P_SKIP
881      * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
882      *      (if multiple mv give same result)*/
883     if( !b_force_no_skip )
884     {
885         if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
886             !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
887             M32( h->mb.cache.mv[0][x264_scan8[0]] ) == M32( h->mb.cache.pskip_mv )
888             && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
889         {
890             h->mb.i_type = P_SKIP;
891         }
892
893         /* Check for B_SKIP */
894         if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
895         {
896             h->mb.i_type = B_SKIP;
897         }
898     }
899 }
900
901 /*****************************************************************************
902  * x264_macroblock_probe_skip:
903  *  Check if the current MB could be encoded as a [PB]_SKIP
904  *****************************************************************************/
905 int x264_macroblock_probe_skip( x264_t *h, int b_bidir )
906 {
907     ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[16] );
908     ALIGNED_ARRAY_16( int16_t, dct2x2,[4] );
909     ALIGNED_ARRAY_16( int16_t, dctscan,[16] );
910     ALIGNED_4( int16_t mvp[2] );
911
912     int i_qp = h->mb.i_qp;
913     int thresh, ssd;
914
915     if( !b_bidir )
916     {
917         /* Get the MV */
918         mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
919         mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
920
921         /* Motion compensation */
922         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
923                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
924                        mvp[0], mvp[1], 16, 16, &h->sh.weight[0][0] );
925     }
926
927     for( int i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
928     {
929         int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
930         int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
931         /* get luma diff */
932         h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset,
933                                     h->mb.pic.p_fdec[0] + fdec_offset );
934         /* encode one 4x4 block */
935         for( int i4x4 = 0; i4x4 < 4; i4x4++ )
936         {
937             if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] ) )
938                 continue;
939             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
940             i_decimate_mb += h->quantf.decimate_score16( dctscan );
941             if( i_decimate_mb >= 6 )
942                 return 0;
943         }
944     }
945
946     /* encode chroma */
947     i_qp = h->mb.i_chroma_qp;
948     thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
949
950     for( int ch = 0; ch < 2; ch++ )
951     {
952         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
953         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
954
955         if( !b_bidir )
956         {
957             /* Special case for mv0, which is (of course) very common in P-skip mode. */
958             if( M32( mvp ) )
959             {
960                 h->mc.mc_chroma( h->mb.pic.p_fdec[1+ch],       FDEC_STRIDE,
961                                  h->mb.pic.p_fref[0][0][4+ch], h->mb.pic.i_stride[1+ch],
962                                  mvp[0], mvp[1], 8, 8 );
963             }
964             else
965                 h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1+ch], FDEC_STRIDE, h->mb.pic.p_fref[0][0][4+ch], h->mb.pic.i_stride[1+ch], 8 );
966
967             if( h->sh.weight[0][1+ch].weightfn )
968                 h->sh.weight[0][1+ch].weightfn[8>>2]( h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
969                                                       h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
970                                                       &h->sh.weight[0][1+ch], 8 );
971         }
972
973         /* there is almost never a termination during chroma, but we can't avoid the check entirely */
974         /* so instead we check SSD and skip the actual check if the score is low enough. */
975         ssd = h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE );
976         if( ssd < thresh )
977             continue;
978
979         /* The vast majority of chroma checks will terminate during the DC check or the higher
980          * threshold check, so we can save time by doing a DC-only DCT. */
981         h->dctf.sub8x8_dct_dc( dct2x2, p_src, p_dst );
982
983         if( h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 ) )
984             return 0;
985
986         /* If there wasn't a termination in DC, we can check against a much higher threshold. */
987         if( ssd < thresh*4 )
988             continue;
989
990         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
991
992         /* calculate dct coeffs */
993         for( int i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ )
994         {
995             /* We don't need to zero the DC coefficient before quantization because we already
996              * checked that all the DCs were zero above at twice the precision that quant4x4
997              * uses.  This applies even though the DC here is being quantized before the 2x2
998              * transform. */
999             if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ) )
1000                 continue;
1001             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
1002             i_decimate_mb += h->quantf.decimate_score15( dctscan );
1003             if( i_decimate_mb >= 7 )
1004                 return 0;
1005         }
1006     }
1007
1008     h->mb.b_skip_mc = 1;
1009     return 1;
1010 }
1011
1012 /****************************************************************************
1013  * DCT-domain noise reduction / adaptive deadzone
1014  * from libavcodec
1015  ****************************************************************************/
1016
1017 void x264_noise_reduction_update( x264_t *h )
1018 {
1019     for( int cat = 0; cat < 2; cat++ )
1020     {
1021         int size = cat ? 64 : 16;
1022         const uint16_t *weight = cat ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
1023
1024         if( h->nr_count[cat] > (cat ? (1<<16) : (1<<18)) )
1025         {
1026             for( int i = 0; i < size; i++ )
1027                 h->nr_residual_sum[cat][i] >>= 1;
1028             h->nr_count[cat] >>= 1;
1029         }
1030
1031         for( int i = 0; i < size; i++ )
1032             h->nr_offset[cat][i] =
1033                 ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
1034                  + h->nr_residual_sum[cat][i]/2)
1035               / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
1036     }
1037 }
1038
1039 /*****************************************************************************
1040  * RD only; 4 calls to this do not make up for one macroblock_encode.
1041  * doesn't transform chroma dc.
1042  *****************************************************************************/
1043 void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
1044 {
1045     int i_qp = h->mb.i_qp;
1046     uint8_t *p_fenc = h->mb.pic.p_fenc[0] + (i8&1)*8 + (i8>>1)*8*FENC_STRIDE;
1047     uint8_t *p_fdec = h->mb.pic.p_fdec[0] + (i8&1)*8 + (i8>>1)*8*FDEC_STRIDE;
1048     int b_decimate = h->mb.b_dct_decimate;
1049     int nnz8x8 = 0;
1050     int nz;
1051
1052     if( !h->mb.b_skip_mc )
1053         x264_mb_mc_8x8( h, i8 );
1054
1055     if( h->mb.b_lossless )
1056     {
1057         if( h->mb.b_transform_8x8 )
1058         {
1059             nnz8x8 = h->zigzagf.sub_8x8( h->dct.luma8x8[i8], p_fenc, p_fdec );
1060             STORE_8x8_NNZ(i8,nnz8x8);
1061         }
1062         else
1063         {
1064             for( int i4 = i8*4; i4 < i8*4+4; i4++ )
1065             {
1066                 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4],
1067                                     h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4],
1068                                     h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4] );
1069                 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1070                 nnz8x8 |= nz;
1071             }
1072         }
1073         for( int ch = 0; ch < 2; ch++ )
1074         {
1075             int16_t dc;
1076             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
1077             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
1078             nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i8+ch*4], p_fenc, p_fdec, &dc );
1079             h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1080         }
1081     }
1082     else
1083     {
1084         if( h->mb.b_transform_8x8 )
1085         {
1086             ALIGNED_ARRAY_16( int16_t, dct8x8,[64] );
1087             h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec );
1088             nnz8x8 = x264_quant_8x8( h, dct8x8, i_qp, 0, i8 );
1089             if( nnz8x8 )
1090             {
1091                 h->zigzagf.scan_8x8( h->dct.luma8x8[i8], dct8x8 );
1092
1093                 if( b_decimate && !h->mb.b_trellis )
1094                     nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[i8] );
1095
1096                 if( nnz8x8 )
1097                 {
1098                     h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8PY], i_qp );
1099                     h->dctf.add8x8_idct8( p_fdec, dct8x8 );
1100                     STORE_8x8_NNZ(i8,1);
1101                 }
1102                 else
1103                     STORE_8x8_NNZ(i8,0);
1104             }
1105             else
1106                 STORE_8x8_NNZ(i8,0);
1107         }
1108         else
1109         {
1110             int i_decimate_8x8 = 0;
1111             ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[16] );
1112             h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec );
1113             for( int i4 = 0; i4 < 4; i4++ )
1114             {
1115                 nz = x264_quant_4x4( h, dct4x4[i4], i_qp, DCT_LUMA_4x4, 0, i8*4+i4 );
1116                 h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = nz;
1117                 if( nz )
1118                 {
1119                     h->zigzagf.scan_4x4( h->dct.luma4x4[i8*4+i4], dct4x4[i4] );
1120                     h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[CQM_4PY], i_qp );
1121                     if( b_decimate )
1122                         i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[i8*4+i4] );
1123                     nnz8x8 = 1;
1124                 }
1125             }
1126
1127             if( b_decimate && i_decimate_8x8 < 4 )
1128                 nnz8x8 = 0;
1129
1130             if( nnz8x8 )
1131                 h->dctf.add8x8_idct( p_fdec, dct4x4 );
1132             else
1133                 STORE_8x8_NNZ(i8,0);
1134         }
1135
1136         i_qp = h->mb.i_chroma_qp;
1137
1138         for( int ch = 0; ch < 2; ch++ )
1139         {
1140             ALIGNED_ARRAY_16( int16_t, dct4x4,[16] );
1141             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
1142             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
1143
1144             h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1145             dct4x4[0] = 0;
1146
1147             if( h->mb.b_trellis )
1148                 nz = x264_quant_4x4_trellis( h, dct4x4, CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 1, 0 );
1149             else
1150                 nz = h->quantf.quant_4x4( dct4x4, h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
1151
1152             h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1153             if( nz )
1154             {
1155                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i8+ch*4], dct4x4 );
1156                 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PC], i_qp );
1157                 h->dctf.add4x4_idct( p_fdec, dct4x4 );
1158             }
1159         }
1160     }
1161     h->mb.i_cbp_luma &= ~(1 << i8);
1162     h->mb.i_cbp_luma |= nnz8x8 << i8;
1163     h->mb.i_cbp_chroma = 0x02;
1164 }
1165
1166 /*****************************************************************************
1167  * RD only, luma only
1168  *****************************************************************************/
1169 void x264_macroblock_encode_p4x4( x264_t *h, int i4 )
1170 {
1171     int i_qp = h->mb.i_qp;
1172     uint8_t *p_fenc = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[i4]];
1173     uint8_t *p_fdec = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
1174     int nz;
1175
1176     /* Don't need motion compensation as this function is only used in qpel-RD, which caches pixel data. */
1177
1178     if( h->mb.b_lossless )
1179     {
1180         nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4], p_fenc, p_fdec );
1181         h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1182     }
1183     else
1184     {
1185         ALIGNED_ARRAY_16( int16_t, dct4x4,[16] );
1186         h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1187         nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 0, i4 );
1188         h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1189         if( nz )
1190         {
1191             h->zigzagf.scan_4x4( h->dct.luma4x4[i4], dct4x4 );
1192             h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PY], i_qp );
1193             h->dctf.add4x4_idct( p_fdec, dct4x4 );
1194         }
1195     }
1196 }