]> git.sesse.net Git - x264/blob - encoder/macroblock.c
Fix some invalid reads caught by valgrind
[x264] / encoder / macroblock.c
1 /*****************************************************************************
2  * macroblock.c: h264 encoder library
3  *****************************************************************************
4  * Copyright (C) 2003-2008 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *****************************************************************************/
24
25 #include "common/common.h"
26 #include "macroblock.h"
27
28 /* These chroma DC functions don't have assembly versions and are only used here. */
29
30 #define ZIG(i,y,x) level[i] = dct[x*2+y];
31 static inline void zigzag_scan_2x2_dc( int16_t level[4], int16_t dct[4] )
32 {
33     ZIG(0,0,0)
34     ZIG(1,0,1)
35     ZIG(2,1,0)
36     ZIG(3,1,1)
37 }
38 #undef ZIG
39
40 #define IDCT_DEQUANT_START \
41     int d0 = dct[0] + dct[1]; \
42     int d1 = dct[2] + dct[3]; \
43     int d2 = dct[0] - dct[1]; \
44     int d3 = dct[2] - dct[3]; \
45     int dmf = dequant_mf[i_qp%6][0]; \
46     int qbits = i_qp/6 - 5; \
47     if( qbits > 0 ) \
48     { \
49         dmf <<= qbits; \
50         qbits = 0; \
51     }
52
53 static inline void idct_dequant_2x2_dc( int16_t dct[4], int16_t dct4x4[4][16], int dequant_mf[6][16], int i_qp )
54 {
55     IDCT_DEQUANT_START
56     dct4x4[0][0] = (d0 + d1) * dmf >> -qbits;
57     dct4x4[1][0] = (d0 - d1) * dmf >> -qbits;
58     dct4x4[2][0] = (d2 + d3) * dmf >> -qbits;
59     dct4x4[3][0] = (d2 - d3) * dmf >> -qbits;
60 }
61
62 static inline void idct_dequant_2x2_dconly( int16_t out[4], int16_t dct[4], int dequant_mf[6][16], int i_qp )
63 {
64     IDCT_DEQUANT_START
65     out[0] = (d0 + d1) * dmf >> -qbits;
66     out[1] = (d0 - d1) * dmf >> -qbits;
67     out[2] = (d2 + d3) * dmf >> -qbits;
68     out[3] = (d2 - d3) * dmf >> -qbits;
69 }
70
71 static inline void dct2x2dc( int16_t d[4], int16_t dct4x4[4][16] )
72 {
73     int d0 = dct4x4[0][0] + dct4x4[1][0];
74     int d1 = dct4x4[2][0] + dct4x4[3][0];
75     int d2 = dct4x4[0][0] - dct4x4[1][0];
76     int d3 = dct4x4[2][0] - dct4x4[3][0];
77     d[0] = d0 + d1;
78     d[2] = d2 + d3;
79     d[1] = d0 - d1;
80     d[3] = d2 - d3;
81     dct4x4[0][0] = 0;
82     dct4x4[1][0] = 0;
83     dct4x4[2][0] = 0;
84     dct4x4[3][0] = 0;
85 }
86
87 static inline void dct2x2dc_dconly( int16_t d[4] )
88 {
89     int d0 = d[0] + d[1];
90     int d1 = d[2] + d[3];
91     int d2 = d[0] - d[1];
92     int d3 = d[2] - d[3];
93     d[0] = d0 + d1;
94     d[2] = d2 + d3;
95     d[1] = d0 - d1;
96     d[3] = d2 - d3;
97 }
98
99 static ALWAYS_INLINE int x264_quant_4x4( x264_t *h, int16_t dct[16], int i_qp, int i_ctxBlockCat, int b_intra, int idx )
100 {
101     int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY;
102     if( h->mb.b_trellis )
103         return x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, i_ctxBlockCat, b_intra, 0, idx );
104     else
105         return h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
106 }
107
108 static ALWAYS_INLINE int x264_quant_8x8( x264_t *h, int16_t dct[64], int i_qp, int b_intra, int idx )
109 {
110     int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY;
111     if( h->mb.b_trellis )
112         return x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx );
113     else
114         return h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] );
115 }
116
117 /* All encoding functions must output the correct CBP and NNZ values.
118  * The entropy coding functions will check CBP first, then NNZ, before
119  * actually reading the DCT coefficients.  NNZ still must be correct even
120  * if CBP is zero because of the use of NNZ values for context selection.
121  * "NNZ" need only be 0 or 1 rather than the exact coefficient count because
122  * that is only needed in CAVLC, and will be calculated by CAVLC's residual
123  * coding and stored as necessary. */
124
125 /* This means that decimation can be done merely by adjusting the CBP and NNZ
126  * rather than memsetting the coefficients. */
127
128 void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp )
129 {
130     int nz;
131     uint8_t *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]];
132     uint8_t *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]];
133     ALIGNED_ARRAY_16( int16_t, dct4x4,[16] );
134
135     if( h->mb.b_lossless )
136     {
137         nz = h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst );
138         h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
139         h->mb.i_cbp_luma |= nz<<(idx>>2);
140         return;
141     }
142
143     h->dctf.sub4x4_dct( dct4x4, p_src, p_dst );
144
145     nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx );
146     h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
147     if( nz )
148     {
149         h->mb.i_cbp_luma |= 1<<(idx>>2);
150         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 );
151         h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp );
152         h->dctf.add4x4_idct( p_dst, dct4x4 );
153     }
154 }
155
156 #define STORE_8x8_NNZ(idx,nz)\
157 {\
158     M16( &h->mb.cache.non_zero_count[x264_scan8[idx*4+0]] ) = nz * 0x0101;\
159     M16( &h->mb.cache.non_zero_count[x264_scan8[idx*4+2]] ) = nz * 0x0101;\
160 }
161
162 #define CLEAR_16x16_NNZ \
163 {\
164     M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = 0;\
165     M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = 0;\
166     M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = 0;\
167     M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = 0;\
168 }
169
170 void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp )
171 {
172     int x = 8 * (idx&1);
173     int y = 8 * (idx>>1);
174     int nz;
175     uint8_t *p_src = &h->mb.pic.p_fenc[0][x+y*FENC_STRIDE];
176     uint8_t *p_dst = &h->mb.pic.p_fdec[0][x+y*FDEC_STRIDE];
177     ALIGNED_ARRAY_16( int16_t, dct8x8,[64] );
178
179     if( h->mb.b_lossless )
180     {
181         nz = h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst );
182         STORE_8x8_NNZ(idx,nz);
183         h->mb.i_cbp_luma |= nz<<idx;
184         return;
185     }
186
187     h->dctf.sub8x8_dct8( dct8x8, p_src, p_dst );
188
189     nz = x264_quant_8x8( h, dct8x8, i_qp, 1, idx );
190     if( nz )
191     {
192         h->mb.i_cbp_luma |= 1<<idx;
193         h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 );
194         h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp );
195         h->dctf.add8x8_idct8( p_dst, dct8x8 );
196         STORE_8x8_NNZ(idx,1);
197     }
198     else
199         STORE_8x8_NNZ(idx,0);
200 }
201
202 static void x264_mb_encode_i16x16( x264_t *h, int i_qp )
203 {
204     uint8_t  *p_src = h->mb.pic.p_fenc[0];
205     uint8_t  *p_dst = h->mb.pic.p_fdec[0];
206
207     ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[16] );
208     ALIGNED_ARRAY_16( int16_t, dct_dc4x4,[16] );
209
210     int i, nz;
211     int b_decimate = h->sh.i_type == SLICE_TYPE_B || (h->param.analyse.b_dct_decimate && h->sh.i_type == SLICE_TYPE_P);
212     int decimate_score = b_decimate ? 0 : 9;
213
214     if( h->mb.b_lossless )
215     {
216         for( i = 0; i < 16; i++ )
217         {
218             int oe = block_idx_xy_fenc[i];
219             int od = block_idx_xy_fdec[i];
220             nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[i], p_src+oe, p_dst+od, &dct_dc4x4[block_idx_yx_1d[i]] );
221             h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
222             h->mb.i_cbp_luma |= nz;
223         }
224         h->mb.i_cbp_luma *= 0xf;
225         h->mb.cache.non_zero_count[x264_scan8[24]] = array_non_zero( dct_dc4x4 );
226         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
227         return;
228     }
229
230     h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
231
232     for( i = 0; i < 16; i++ )
233     {
234         /* copy dc coeff */
235         dct_dc4x4[block_idx_xy_1d[i]] = dct4x4[i][0];
236         dct4x4[i][0] = 0;
237
238         /* quant/scan/dequant */
239         nz = x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i );
240         h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
241         if( nz )
242         {
243             h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] );
244             h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp );
245             if( decimate_score < 6 ) decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[i] );
246             h->mb.i_cbp_luma = 0xf;
247         }
248     }
249
250     /* Writing the 16 CBFs in an i16x16 block is quite costly, so decimation can save many bits. */
251     /* More useful with CAVLC, but still useful with CABAC. */
252     if( decimate_score < 6 )
253     {
254         h->mb.i_cbp_luma = 0;
255         CLEAR_16x16_NNZ
256     }
257
258     h->dctf.dct4x4dc( dct_dc4x4 );
259     if( h->mb.b_trellis )
260         nz = x264_quant_dc_trellis( h, dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1, 0 );
261     else
262         nz = h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 );
263
264     h->mb.cache.non_zero_count[x264_scan8[24]] = nz;
265     if( nz )
266     {
267         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
268
269         /* output samples to fdec */
270         h->dctf.idct4x4dc( dct_dc4x4 );
271         h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp );  /* XXX not inversed */
272         if( h->mb.i_cbp_luma )
273             for( i = 0; i < 16; i++ )
274                 dct4x4[i][0] = dct_dc4x4[block_idx_xy_1d[i]];
275     }
276
277     /* put pixels to fdec */
278     if( h->mb.i_cbp_luma )
279         h->dctf.add16x16_idct( p_dst, dct4x4 );
280     else if( nz )
281         h->dctf.add16x16_idct_dc( p_dst, dct_dc4x4 );
282 }
283
284 static inline int idct_dequant_round_2x2_dc( int16_t ref[4], int16_t dct[4], int dequant_mf[6][16], int i_qp )
285 {
286     int16_t out[4];
287     idct_dequant_2x2_dconly( out, dct, dequant_mf, i_qp );
288     return ((ref[0] ^ (out[0]+32))
289           | (ref[1] ^ (out[1]+32))
290           | (ref[2] ^ (out[2]+32))
291           | (ref[3] ^ (out[3]+32))) >> 6;
292 }
293
294 /* Round down coefficients losslessly in DC-only chroma blocks.
295  * Unlike luma blocks, this can't be done with a lookup table or
296  * other shortcut technique because of the interdependencies
297  * between the coefficients due to the chroma DC transform. */
298 static inline int x264_mb_optimize_chroma_dc( x264_t *h, int b_inter, int i_qp, int16_t dct2x2[4] )
299 {
300     int16_t dct2x2_orig[4];
301     int coeff;
302     int nz = 0;
303
304     /* If the QP is too high, there's no benefit to rounding optimization. */
305     if( h->dequant4_mf[CQM_4IC + b_inter][i_qp%6][0] << (i_qp/6) > 32*64 )
306         return 1;
307
308     idct_dequant_2x2_dconly( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
309     dct2x2_orig[0] += 32;
310     dct2x2_orig[1] += 32;
311     dct2x2_orig[2] += 32;
312     dct2x2_orig[3] += 32;
313
314     /* If the DC coefficients already round to zero, terminate early. */
315     if( !((dct2x2_orig[0]|dct2x2_orig[1]|dct2x2_orig[2]|dct2x2_orig[3])>>6) )
316         return 0;
317
318     /* Start with the highest frequency coefficient... is this the best option? */
319     for( coeff = 3; coeff >= 0; coeff-- )
320     {
321         int sign = dct2x2[coeff] < 0 ? -1 : 1;
322         int level = dct2x2[coeff];
323
324         if( !level )
325             continue;
326
327         while( level )
328         {
329             dct2x2[coeff] = level - sign;
330             if( idct_dequant_round_2x2_dc( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp ) )
331                 break;
332             level -= sign;
333         }
334
335         nz |= level;
336         dct2x2[coeff] = level;
337     }
338
339     return !!nz;
340 }
341
342 void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp )
343 {
344     int i, ch, nz, nz_dc;
345     int b_decimate = b_inter && (h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate);
346     ALIGNED_ARRAY_16( int16_t, dct2x2,[4] );
347     h->mb.i_cbp_chroma = 0;
348
349     /* Early termination: check variance of chroma residual before encoding.
350      * Don't bother trying early termination at low QPs.
351      * Values are experimentally derived. */
352     if( b_decimate && i_qp >= (h->mb.b_trellis ? 12 : 18) )
353     {
354         int thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
355         int ssd[2];
356         int score  = h->pixf.var2_8x8( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE, &ssd[0] );
357             score += h->pixf.var2_8x8( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE, &ssd[1] );
358         if( score < thresh*4 )
359         {
360             h->mb.cache.non_zero_count[x264_scan8[16]] = 0;
361             h->mb.cache.non_zero_count[x264_scan8[17]] = 0;
362             h->mb.cache.non_zero_count[x264_scan8[18]] = 0;
363             h->mb.cache.non_zero_count[x264_scan8[19]] = 0;
364             h->mb.cache.non_zero_count[x264_scan8[20]] = 0;
365             h->mb.cache.non_zero_count[x264_scan8[21]] = 0;
366             h->mb.cache.non_zero_count[x264_scan8[22]] = 0;
367             h->mb.cache.non_zero_count[x264_scan8[23]] = 0;
368             h->mb.cache.non_zero_count[x264_scan8[25]] = 0;
369             h->mb.cache.non_zero_count[x264_scan8[26]] = 0;
370             for( ch = 0; ch < 2; ch++ )
371             {
372                 if( ssd[ch] > thresh )
373                 {
374                     h->dctf.sub8x8_dct_dc( dct2x2, h->mb.pic.p_fenc[1+ch], h->mb.pic.p_fdec[1+ch] );
375                     dct2x2dc_dconly( dct2x2 );
376                     if( h->mb.b_trellis )
377                         nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
378                     else
379                         nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<
380     1 );
381
382                     if( nz_dc )
383                     {
384                         if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
385                             continue;
386                         h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 1;
387                         zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
388                         idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
389                         h->dctf.add8x8_idct_dc( h->mb.pic.p_fdec[1+ch], dct2x2 );
390                         h->mb.i_cbp_chroma = 1;
391                     }
392                 }
393             }
394             return;
395         }
396     }
397
398     for( ch = 0; ch < 2; ch++ )
399     {
400         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
401         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
402         int i_decimate_score = 0;
403         int nz_ac = 0;
404
405         ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[16] );
406
407         if( h->mb.b_lossless )
408         {
409             for( i = 0; i < 4; i++ )
410             {
411                 int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE;
412                 int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE;
413                 nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od, &h->dct.chroma_dc[ch][i] );
414                 h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
415                 h->mb.i_cbp_chroma |= nz;
416             }
417             h->mb.cache.non_zero_count[x264_scan8[25]+ch] = array_non_zero( h->dct.chroma_dc[ch] );
418             continue;
419         }
420
421         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
422         dct2x2dc( dct2x2, dct4x4 );
423         /* calculate dct coeffs */
424         for( i = 0; i < 4; i++ )
425         {
426             if( h->mb.b_trellis )
427                 nz = x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 1, 0 );
428             else
429                 nz = h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] );
430             h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
431             if( nz )
432             {
433                 nz_ac = 1;
434                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] );
435                 h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp );
436                 if( b_decimate )
437                     i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] );
438             }
439         }
440
441         if( h->mb.b_trellis )
442             nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
443         else
444             nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
445
446         h->mb.cache.non_zero_count[x264_scan8[25]+ch] = nz_dc;
447
448         if( (b_decimate && i_decimate_score < 7) || !nz_ac )
449         {
450             /* Decimate the block */
451             h->mb.cache.non_zero_count[x264_scan8[16+0]+24*ch] = 0;
452             h->mb.cache.non_zero_count[x264_scan8[16+1]+24*ch] = 0;
453             h->mb.cache.non_zero_count[x264_scan8[16+2]+24*ch] = 0;
454             h->mb.cache.non_zero_count[x264_scan8[16+3]+24*ch] = 0;
455             if( !nz_dc ) /* Whole block is empty */
456                 continue;
457             if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
458             {
459                 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 0;
460                 continue;
461             }
462             /* DC-only */
463             zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
464             idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
465             h->dctf.add8x8_idct_dc( p_dst, dct2x2 );
466         }
467         else
468         {
469             h->mb.i_cbp_chroma = 1;
470             if( nz_dc )
471             {
472                 zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
473                 idct_dequant_2x2_dc( dct2x2, dct4x4, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
474             }
475             h->dctf.add8x8_idct( p_dst, dct4x4 );
476         }
477     }
478
479     if( h->mb.i_cbp_chroma )
480         h->mb.i_cbp_chroma = 2;    /* dc+ac (we can't do only ac) */
481     else if( h->mb.cache.non_zero_count[x264_scan8[25]] |
482              h->mb.cache.non_zero_count[x264_scan8[26]] )
483         h->mb.i_cbp_chroma = 1;    /* dc only */
484 }
485
486 static void x264_macroblock_encode_skip( x264_t *h )
487 {
488     h->mb.i_cbp_luma = 0x00;
489     h->mb.i_cbp_chroma = 0x00;
490     memset( h->mb.cache.non_zero_count, 0, X264_SCAN8_SIZE );
491     /* store cbp */
492     h->mb.cbp[h->mb.i_mb_xy] = 0;
493 }
494
495 /*****************************************************************************
496  * x264_macroblock_encode_pskip:
497  *  Encode an already marked skip block
498  *****************************************************************************/
499 static void x264_macroblock_encode_pskip( x264_t *h )
500 {
501     const int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
502                                 h->mb.mv_min[0], h->mb.mv_max[0] );
503     const int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
504                                 h->mb.mv_min[1], h->mb.mv_max[1] );
505
506     /* don't do pskip motion compensation if it was already done in macroblock_analyse */
507     if( !h->mb.b_skip_mc )
508     {
509         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
510                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
511                        mvx, mvy, 16, 16, &h->sh.weight[0][0] );
512
513         h->mc.mc_chroma( h->mb.pic.p_fdec[1],       FDEC_STRIDE,
514                          h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
515                          mvx, mvy, 8, 8 );
516
517         if( h->sh.weight[0][1].weightfn )
518             h->sh.weight[0][1].weightfn[8>>2]( h->mb.pic.p_fdec[1], FDEC_STRIDE,
519                                                h->mb.pic.p_fdec[1], FDEC_STRIDE,
520                                                &h->sh.weight[0][1], 8 );
521
522         h->mc.mc_chroma( h->mb.pic.p_fdec[2],       FDEC_STRIDE,
523                          h->mb.pic.p_fref[0][0][5], h->mb.pic.i_stride[2],
524                          mvx, mvy, 8, 8 );
525
526         if( h->sh.weight[0][2].weightfn )
527             h->sh.weight[0][2].weightfn[8>>2]( h->mb.pic.p_fdec[2], FDEC_STRIDE,
528                                                h->mb.pic.p_fdec[2], FDEC_STRIDE,
529                                                &h->sh.weight[0][2], 8 );
530     }
531
532     x264_macroblock_encode_skip( h );
533 }
534
535 /*****************************************************************************
536  * Intra prediction for predictive lossless mode.
537  *****************************************************************************/
538
539 /* Note that these functions take a shortcut (mc.copy instead of actual pixel prediction) which assumes
540  * that the edge pixels of the reconstructed frame are the same as that of the source frame.  This means
541  * they will only work correctly if the neighboring blocks are losslessly coded.  In practice, this means
542  * lossless mode cannot be mixed with lossy mode within a frame. */
543 /* This can be resolved by explicitly copying the edge pixels after doing the mc.copy, but this doesn't
544  * need to be done unless we decide to allow mixing lossless and lossy compression. */
545
546 void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode )
547 {
548     int stride = h->fenc->i_stride[1] << h->mb.b_interlaced;
549     if( i_mode == I_PRED_CHROMA_V )
550     {
551         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-stride, stride, 8 );
552         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-stride, stride, 8 );
553     }
554     else if( i_mode == I_PRED_CHROMA_H )
555     {
556         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-1, stride, 8 );
557         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-1, stride, 8 );
558     }
559     else
560     {
561         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
562         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
563     }
564 }
565
566 void x264_predict_lossless_4x4( x264_t *h, uint8_t *p_dst, int idx, int i_mode )
567 {
568     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
569     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
570
571     if( i_mode == I_PRED_4x4_V )
572         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
573     else if( i_mode == I_PRED_4x4_H )
574         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
575     else
576         h->predict_4x4[i_mode]( p_dst );
577 }
578
579 void x264_predict_lossless_8x8( x264_t *h, uint8_t *p_dst, int idx, int i_mode, uint8_t edge[33] )
580 {
581     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
582     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
583
584     if( i_mode == I_PRED_8x8_V )
585         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
586     else if( i_mode == I_PRED_8x8_H )
587         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
588     else
589         h->predict_8x8[i_mode]( p_dst, edge );
590 }
591
592 void x264_predict_lossless_16x16( x264_t *h, int i_mode )
593 {
594     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
595     if( i_mode == I_PRED_16x16_V )
596         h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
597     else if( i_mode == I_PRED_16x16_H )
598         h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 );
599     else
600         h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
601 }
602
603 /*****************************************************************************
604  * x264_macroblock_encode:
605  *****************************************************************************/
606 void x264_macroblock_encode( x264_t *h )
607 {
608     int i_cbp_dc = 0;
609     int i_qp = h->mb.i_qp;
610     int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate;
611     int b_force_no_skip = 0;
612     int i,idx,nz;
613     h->mb.i_cbp_luma = 0;
614     h->mb.cache.non_zero_count[x264_scan8[24]] = 0;
615
616     if( h->sh.b_mbaff
617         && h->mb.i_mb_xy == h->sh.i_first_mb + h->mb.i_mb_stride
618         && IS_SKIP(h->mb.type[h->sh.i_first_mb]) )
619     {
620         /* The first skip is predicted to be a frame mb pair.
621          * We don't yet support the aff part of mbaff, so force it to non-skip
622          * so that we can pick the aff flag. */
623         b_force_no_skip = 1;
624         if( IS_SKIP(h->mb.i_type) )
625         {
626             if( h->mb.i_type == P_SKIP )
627                 h->mb.i_type = P_L0;
628             else if( h->mb.i_type == B_SKIP )
629                 h->mb.i_type = B_DIRECT;
630         }
631     }
632
633     if( h->mb.i_type == P_SKIP )
634     {
635         /* A bit special */
636         x264_macroblock_encode_pskip( h );
637         return;
638     }
639     if( h->mb.i_type == B_SKIP )
640     {
641         /* don't do bskip motion compensation if it was already done in macroblock_analyse */
642         if( !h->mb.b_skip_mc )
643             x264_mb_mc( h );
644         x264_macroblock_encode_skip( h );
645         return;
646     }
647
648     if( h->mb.i_type == I_16x16 )
649     {
650         const int i_mode = h->mb.i_intra16x16_pred_mode;
651         h->mb.b_transform_8x8 = 0;
652
653         if( h->mb.b_lossless )
654             x264_predict_lossless_16x16( h, i_mode );
655         else
656             h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
657
658         /* encode the 16x16 macroblock */
659         x264_mb_encode_i16x16( h, i_qp );
660     }
661     else if( h->mb.i_type == I_8x8 )
662     {
663         ALIGNED_ARRAY_16( uint8_t, edge,[33] );
664         h->mb.b_transform_8x8 = 1;
665         /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
666         if( h->mb.i_skip_intra )
667         {
668             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
669             M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i8x8_nnz_buf[0];
670             M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i8x8_nnz_buf[1];
671             M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i8x8_nnz_buf[2];
672             M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i8x8_nnz_buf[3];
673             h->mb.i_cbp_luma = h->mb.pic.i8x8_cbp;
674             /* In RD mode, restore the now-overwritten DCT data. */
675             if( h->mb.i_skip_intra == 2 )
676                 h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
677         }
678         for( i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ )
679         {
680             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE];
681             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
682             h->predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] );
683
684             if( h->mb.b_lossless )
685                 x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge );
686             else
687                 h->predict_8x8[i_mode]( p_dst, edge );
688
689             x264_mb_encode_i8x8( h, i, i_qp );
690         }
691     }
692     else if( h->mb.i_type == I_4x4 )
693     {
694         h->mb.b_transform_8x8 = 0;
695         /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
696         if( h->mb.i_skip_intra )
697         {
698             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
699             M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i4x4_nnz_buf[0];
700             M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i4x4_nnz_buf[1];
701             M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i4x4_nnz_buf[2];
702             M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i4x4_nnz_buf[3];
703             h->mb.i_cbp_luma = h->mb.pic.i4x4_cbp;
704             /* In RD mode, restore the now-overwritten DCT data. */
705             if( h->mb.i_skip_intra == 2 )
706                 h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
707         }
708         for( i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ )
709         {
710             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]];
711             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
712
713             if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
714                 /* emulate missing topright samples */
715                 M32( &p_dst[4-FDEC_STRIDE] ) = p_dst[3-FDEC_STRIDE] * 0x01010101U;
716
717             if( h->mb.b_lossless )
718                 x264_predict_lossless_4x4( h, p_dst, i, i_mode );
719             else
720                 h->predict_4x4[i_mode]( p_dst );
721             x264_mb_encode_i4x4( h, i, i_qp );
722         }
723     }
724     else    /* Inter MB */
725     {
726         int i8x8, i4x4;
727         int i_decimate_mb = 0;
728
729         /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
730         if( !h->mb.b_skip_mc )
731             x264_mb_mc( h );
732
733         if( h->mb.b_lossless )
734         {
735             if( h->mb.b_transform_8x8 )
736                 for( i8x8 = 0; i8x8 < 4; i8x8++ )
737                 {
738                     int x = 8*(i8x8&1);
739                     int y = 8*(i8x8>>1);
740                     nz = h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8],
741                                         h->mb.pic.p_fenc[0]+x+y*FENC_STRIDE,
742                                         h->mb.pic.p_fdec[0]+x+y*FDEC_STRIDE );
743                     STORE_8x8_NNZ(i8x8,nz);
744                     h->mb.i_cbp_luma |= nz << i8x8;
745                 }
746             else
747                 for( i4x4 = 0; i4x4 < 16; i4x4++ )
748                 {
749                     nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4],
750                                         h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4],
751                                         h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] );
752                     h->mb.cache.non_zero_count[x264_scan8[i4x4]] = nz;
753                     h->mb.i_cbp_luma |= nz << (i4x4>>2);
754                 }
755         }
756         else if( h->mb.b_transform_8x8 )
757         {
758             ALIGNED_ARRAY_16( int16_t, dct8x8,[4],[64] );
759             b_decimate &= !h->mb.b_trellis; // 8x8 trellis is inherently optimal decimation
760             h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
761             h->nr_count[1] += h->mb.b_noise_reduction * 4;
762
763             for( idx = 0; idx < 4; idx++ )
764             {
765                 if( h->mb.b_noise_reduction )
766                     h->quantf.denoise_dct( dct8x8[idx], h->nr_residual_sum[1], h->nr_offset[1], 64 );
767                 nz = x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx );
768
769                 if( nz )
770                 {
771                     h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] );
772                     if( b_decimate )
773                     {
774                         int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] );
775                         i_decimate_mb += i_decimate_8x8;
776                         if( i_decimate_8x8 >= 4 )
777                             h->mb.i_cbp_luma |= 1<<idx;
778                     }
779                     else
780                         h->mb.i_cbp_luma |= 1<<idx;
781                 }
782             }
783
784             if( i_decimate_mb < 6 && b_decimate )
785             {
786                 h->mb.i_cbp_luma = 0;
787                 CLEAR_16x16_NNZ
788             }
789             else
790             {
791                 for( idx = 0; idx < 4; idx++ )
792                 {
793                     if( h->mb.i_cbp_luma&(1<<idx) )
794                     {
795                         h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
796                         h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][(idx&1)*8 + (idx>>1)*8*FDEC_STRIDE], dct8x8[idx] );
797                         STORE_8x8_NNZ(idx,1);
798                     }
799                     else
800                         STORE_8x8_NNZ(idx,0);
801                 }
802             }
803         }
804         else
805         {
806             ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[16] );
807             h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
808             h->nr_count[0] += h->mb.b_noise_reduction * 16;
809
810             for( i8x8 = 0; i8x8 < 4; i8x8++ )
811             {
812                 int i_decimate_8x8 = 0;
813                 int cbp = 0;
814
815                 /* encode one 4x4 block */
816                 for( i4x4 = 0; i4x4 < 4; i4x4++ )
817                 {
818                     idx = i8x8 * 4 + i4x4;
819
820                     if( h->mb.b_noise_reduction )
821                         h->quantf.denoise_dct( dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 );
822                     nz = x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx );
823                     h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
824
825                     if( nz )
826                     {
827                         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] );
828                         h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[CQM_4PY], i_qp );
829                         if( b_decimate && i_decimate_8x8 < 6 )
830                             i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] );
831                         cbp = 1;
832                     }
833                 }
834
835                 /* decimate this 8x8 block */
836                 i_decimate_mb += i_decimate_8x8;
837                 if( b_decimate )
838                 {
839                     if( i_decimate_8x8 < 4 )
840                         STORE_8x8_NNZ(i8x8,0)
841                     else
842                         h->mb.i_cbp_luma |= 1<<i8x8;
843                 }
844                 else if( cbp )
845                 {
846                     h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
847                     h->mb.i_cbp_luma |= 1<<i8x8;
848                 }
849             }
850
851             if( b_decimate )
852             {
853                 if( i_decimate_mb < 6 )
854                 {
855                     h->mb.i_cbp_luma = 0;
856                     CLEAR_16x16_NNZ
857                 }
858                 else
859                 {
860                     for( i8x8 = 0; i8x8 < 4; i8x8++ )
861                         if( h->mb.i_cbp_luma&(1<<i8x8) )
862                             h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
863                 }
864             }
865         }
866     }
867
868     /* encode chroma */
869     if( IS_INTRA( h->mb.i_type ) )
870     {
871         const int i_mode = h->mb.i_chroma_pred_mode;
872         if( h->mb.b_lossless )
873             x264_predict_lossless_8x8_chroma( h, i_mode );
874         else
875         {
876             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
877             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
878         }
879     }
880
881     /* encode the 8x8 blocks */
882     x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
883
884     if( h->param.b_cabac )
885     {
886         i_cbp_dc = h->mb.cache.non_zero_count[x264_scan8[24]]
887                  | h->mb.cache.non_zero_count[x264_scan8[25]] << 1
888                  | h->mb.cache.non_zero_count[x264_scan8[26]] << 2;
889     }
890
891     /* store cbp */
892     h->mb.cbp[h->mb.i_mb_xy] = (i_cbp_dc << 8) | (h->mb.i_cbp_chroma << 4) | h->mb.i_cbp_luma;
893
894     /* Check for P_SKIP
895      * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
896      *      (if multiple mv give same result)*/
897     if( !b_force_no_skip )
898     {
899         if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
900             !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
901             M32( h->mb.cache.mv[0][x264_scan8[0]] ) == M32( h->mb.cache.pskip_mv )
902             && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
903         {
904             h->mb.i_type = P_SKIP;
905         }
906
907         /* Check for B_SKIP */
908         if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
909         {
910             h->mb.i_type = B_SKIP;
911         }
912     }
913 }
914
915 /*****************************************************************************
916  * x264_macroblock_probe_skip:
917  *  Check if the current MB could be encoded as a [PB]_SKIP (it supposes you use
918  *  the previous QP
919  *****************************************************************************/
920 int x264_macroblock_probe_skip( x264_t *h, int b_bidir )
921 {
922     ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[16] );
923     ALIGNED_ARRAY_16( int16_t, dct2x2,[4] );
924     ALIGNED_ARRAY_16( int16_t, dctscan,[16] );
925
926     int i_qp = h->mb.i_qp;
927     int mvp[2];
928     int ch, thresh, ssd;
929
930     int i8x8, i4x4;
931     int i_decimate_mb;
932
933     if( !b_bidir )
934     {
935         /* Get the MV */
936         mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
937         mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
938
939         /* Motion compensation */
940         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
941                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
942                        mvp[0], mvp[1], 16, 16, &h->sh.weight[0][0] );
943     }
944
945     for( i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
946     {
947         int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
948         int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
949         /* get luma diff */
950         h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset,
951                                     h->mb.pic.p_fdec[0] + fdec_offset );
952         /* encode one 4x4 block */
953         for( i4x4 = 0; i4x4 < 4; i4x4++ )
954         {
955             if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] ) )
956                 continue;
957             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
958             i_decimate_mb += h->quantf.decimate_score16( dctscan );
959             if( i_decimate_mb >= 6 )
960                 return 0;
961         }
962     }
963
964     /* encode chroma */
965     i_qp = h->mb.i_chroma_qp;
966     thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
967
968     for( ch = 0; ch < 2; ch++ )
969     {
970         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
971         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
972
973         if( !b_bidir )
974         {
975             h->mc.mc_chroma( h->mb.pic.p_fdec[1+ch],       FDEC_STRIDE,
976                              h->mb.pic.p_fref[0][0][4+ch], h->mb.pic.i_stride[1+ch],
977                              mvp[0], mvp[1], 8, 8 );
978
979             if( h->sh.weight[0][1+ch].weightfn )
980                 h->sh.weight[0][1+ch].weightfn[8>>2]( h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
981                                                       h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
982                                                       &h->sh.weight[0][1+ch], 8 );
983         }
984
985         /* there is almost never a termination during chroma, but we can't avoid the check entirely */
986         /* so instead we check SSD and skip the actual check if the score is low enough. */
987         ssd = h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE );
988         if( ssd < thresh )
989             continue;
990
991         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
992
993         /* calculate dct DC */
994         dct2x2dc( dct2x2, dct4x4 );
995         if( h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 ) )
996             return 0;
997
998         /* If there wasn't a termination in DC, we can check against a much higher threshold. */
999         if( ssd < thresh*4 )
1000             continue;
1001
1002         /* calculate dct coeffs */
1003         for( i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ )
1004         {
1005             if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ) )
1006                 continue;
1007             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
1008             i_decimate_mb += h->quantf.decimate_score15( dctscan );
1009             if( i_decimate_mb >= 7 )
1010                 return 0;
1011         }
1012     }
1013
1014     h->mb.b_skip_mc = 1;
1015     return 1;
1016 }
1017
1018 /****************************************************************************
1019  * DCT-domain noise reduction / adaptive deadzone
1020  * from libavcodec
1021  ****************************************************************************/
1022
1023 void x264_noise_reduction_update( x264_t *h )
1024 {
1025     int cat, i;
1026     for( cat = 0; cat < 2; cat++ )
1027     {
1028         int size = cat ? 64 : 16;
1029         const uint16_t *weight = cat ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
1030
1031         if( h->nr_count[cat] > (cat ? (1<<16) : (1<<18)) )
1032         {
1033             for( i = 0; i < size; i++ )
1034                 h->nr_residual_sum[cat][i] >>= 1;
1035             h->nr_count[cat] >>= 1;
1036         }
1037
1038         for( i = 0; i < size; i++ )
1039             h->nr_offset[cat][i] =
1040                 ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
1041                  + h->nr_residual_sum[cat][i]/2)
1042               / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
1043     }
1044 }
1045
1046 /*****************************************************************************
1047  * RD only; 4 calls to this do not make up for one macroblock_encode.
1048  * doesn't transform chroma dc.
1049  *****************************************************************************/
1050 void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
1051 {
1052     int i_qp = h->mb.i_qp;
1053     uint8_t *p_fenc = h->mb.pic.p_fenc[0] + (i8&1)*8 + (i8>>1)*8*FENC_STRIDE;
1054     uint8_t *p_fdec = h->mb.pic.p_fdec[0] + (i8&1)*8 + (i8>>1)*8*FDEC_STRIDE;
1055     int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate;
1056     int nnz8x8 = 0;
1057     int ch, nz;
1058
1059     if( !h->mb.b_skip_mc )
1060         x264_mb_mc_8x8( h, i8 );
1061
1062     if( h->mb.b_lossless )
1063     {
1064         int i4;
1065         if( h->mb.b_transform_8x8 )
1066         {
1067             nnz8x8 = h->zigzagf.sub_8x8( h->dct.luma8x8[i8], p_fenc, p_fdec );
1068             STORE_8x8_NNZ(i8,nnz8x8);
1069         }
1070         else
1071         {
1072             for( i4 = i8*4; i4 < i8*4+4; i4++ )
1073             {
1074                 int nz;
1075                 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4],
1076                                     h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4],
1077                                     h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4] );
1078                 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1079                 nnz8x8 |= nz;
1080             }
1081         }
1082         for( ch = 0; ch < 2; ch++ )
1083         {
1084             int16_t dc;
1085             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
1086             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
1087             nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i8+ch*4], p_fenc, p_fdec, &dc );
1088             h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1089         }
1090     }
1091     else
1092     {
1093         if( h->mb.b_transform_8x8 )
1094         {
1095             ALIGNED_ARRAY_16( int16_t, dct8x8,[64] );
1096             h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec );
1097             nnz8x8 = x264_quant_8x8( h, dct8x8, i_qp, 0, i8 );
1098             if( nnz8x8 )
1099             {
1100                 h->zigzagf.scan_8x8( h->dct.luma8x8[i8], dct8x8 );
1101
1102                 if( b_decimate && !h->mb.b_trellis )
1103                     nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[i8] );
1104
1105                 if( nnz8x8 )
1106                 {
1107                     h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8PY], i_qp );
1108                     h->dctf.add8x8_idct8( p_fdec, dct8x8 );
1109                     STORE_8x8_NNZ(i8,1);
1110                 }
1111                 else
1112                     STORE_8x8_NNZ(i8,0);
1113             }
1114             else
1115                 STORE_8x8_NNZ(i8,0);
1116         }
1117         else
1118         {
1119             int i4;
1120             int i_decimate_8x8 = 0;
1121             ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[16] );
1122             h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec );
1123             for( i4 = 0; i4 < 4; i4++ )
1124             {
1125                 nz = x264_quant_4x4( h, dct4x4[i4], i_qp, DCT_LUMA_4x4, 0, i8*4+i4 );
1126                 h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = nz;
1127                 if( nz )
1128                 {
1129                     h->zigzagf.scan_4x4( h->dct.luma4x4[i8*4+i4], dct4x4[i4] );
1130                     h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[CQM_4PY], i_qp );
1131                     if( b_decimate )
1132                         i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[i8*4+i4] );
1133                     nnz8x8 = 1;
1134                 }
1135             }
1136
1137             if( b_decimate && i_decimate_8x8 < 4 )
1138                 nnz8x8 = 0;
1139
1140             if( nnz8x8 )
1141                 h->dctf.add8x8_idct( p_fdec, dct4x4 );
1142             else
1143                 STORE_8x8_NNZ(i8,0);
1144         }
1145
1146         i_qp = h->mb.i_chroma_qp;
1147
1148         for( ch = 0; ch < 2; ch++ )
1149         {
1150             ALIGNED_ARRAY_16( int16_t, dct4x4,[16] );
1151             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
1152             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
1153
1154             h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1155             dct4x4[0] = 0;
1156
1157             if( h->mb.b_trellis )
1158                 nz = x264_quant_4x4_trellis( h, dct4x4, CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 1, 0 );
1159             else
1160                 nz = h->quantf.quant_4x4( dct4x4, h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
1161
1162             h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1163             if( nz )
1164             {
1165                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i8+ch*4], dct4x4 );
1166                 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PC], i_qp );
1167                 h->dctf.add4x4_idct( p_fdec, dct4x4 );
1168             }
1169         }
1170     }
1171     h->mb.i_cbp_luma &= ~(1 << i8);
1172     h->mb.i_cbp_luma |= nnz8x8 << i8;
1173     h->mb.i_cbp_chroma = 0x02;
1174 }
1175
1176 /*****************************************************************************
1177  * RD only, luma only
1178  *****************************************************************************/
1179 void x264_macroblock_encode_p4x4( x264_t *h, int i4 )
1180 {
1181     int i_qp = h->mb.i_qp;
1182     uint8_t *p_fenc = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[i4]];
1183     uint8_t *p_fdec = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
1184     int nz;
1185
1186     /* Don't need motion compensation as this function is only used in qpel-RD, which caches pixel data. */
1187
1188     if( h->mb.b_lossless )
1189     {
1190         nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4], p_fenc, p_fdec );
1191         h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1192     }
1193     else
1194     {
1195         ALIGNED_ARRAY_16( int16_t, dct4x4,[16] );
1196         h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1197         nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 0, i4 );
1198         h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1199         if( nz )
1200         {
1201             h->zigzagf.scan_4x4( h->dct.luma4x4[i4], dct4x4 );
1202             h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PY], i_qp );
1203             h->dctf.add4x4_idct( p_fdec, dct4x4 );
1204         }
1205     }
1206 }