]> git.sesse.net Git - x264/blob - encoder/macroblock.c
Faster probe_skip, 2x2 DC transform handling
[x264] / encoder / macroblock.c
1 /*****************************************************************************
2  * macroblock.c: h264 encoder library
3  *****************************************************************************
4  * Copyright (C) 2003-2008 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *****************************************************************************/
24
25 #include "common/common.h"
26 #include "macroblock.h"
27
28 /* These chroma DC functions don't have assembly versions and are only used here. */
29
30 #define ZIG(i,y,x) level[i] = dct[x*2+y];
31 static inline void zigzag_scan_2x2_dc( int16_t level[4], int16_t dct[4] )
32 {
33     ZIG(0,0,0)
34     ZIG(1,0,1)
35     ZIG(2,1,0)
36     ZIG(3,1,1)
37 }
38 #undef ZIG
39
40 #define IDCT_DEQUANT_START \
41     int d0 = dct[0] + dct[1]; \
42     int d1 = dct[2] + dct[3]; \
43     int d2 = dct[0] - dct[1]; \
44     int d3 = dct[2] - dct[3]; \
45     int dmf = dequant_mf[i_qp%6][0] << i_qp/6;
46
47 static inline void idct_dequant_2x2_dc( int16_t dct[4], int16_t dct4x4[4][16], int dequant_mf[6][16], int i_qp )
48 {
49     IDCT_DEQUANT_START
50     dct4x4[0][0] = (d0 + d1) * dmf >> 5;
51     dct4x4[1][0] = (d0 - d1) * dmf >> 5;
52     dct4x4[2][0] = (d2 + d3) * dmf >> 5;
53     dct4x4[3][0] = (d2 - d3) * dmf >> 5;
54 }
55
56 static inline void idct_dequant_2x2_dconly( int16_t out[4], int16_t dct[4], int dequant_mf[6][16], int i_qp )
57 {
58     IDCT_DEQUANT_START
59     out[0] = (d0 + d1) * dmf >> 5;
60     out[1] = (d0 - d1) * dmf >> 5;
61     out[2] = (d2 + d3) * dmf >> 5;
62     out[3] = (d2 - d3) * dmf >> 5;
63 }
64
65 static inline void dct2x2dc( int16_t d[4], int16_t dct4x4[4][16] )
66 {
67     int d0 = dct4x4[0][0] + dct4x4[1][0];
68     int d1 = dct4x4[2][0] + dct4x4[3][0];
69     int d2 = dct4x4[0][0] - dct4x4[1][0];
70     int d3 = dct4x4[2][0] - dct4x4[3][0];
71     d[0] = d0 + d1;
72     d[2] = d2 + d3;
73     d[1] = d0 - d1;
74     d[3] = d2 - d3;
75     dct4x4[0][0] = 0;
76     dct4x4[1][0] = 0;
77     dct4x4[2][0] = 0;
78     dct4x4[3][0] = 0;
79 }
80
81 static inline void dct2x2dc_dconly( int16_t d[4] )
82 {
83     int d0 = d[0] + d[1];
84     int d1 = d[2] + d[3];
85     int d2 = d[0] - d[1];
86     int d3 = d[2] - d[3];
87     d[0] = d0 + d1;
88     d[2] = d2 + d3;
89     d[1] = d0 - d1;
90     d[3] = d2 - d3;
91 }
92
93 static ALWAYS_INLINE int x264_quant_4x4( x264_t *h, int16_t dct[16], int i_qp, int i_ctxBlockCat, int b_intra, int idx )
94 {
95     int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY;
96     if( h->mb.b_trellis )
97         return x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, i_ctxBlockCat, b_intra, 0, idx );
98     else
99         return h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
100 }
101
102 static ALWAYS_INLINE int x264_quant_8x8( x264_t *h, int16_t dct[64], int i_qp, int b_intra, int idx )
103 {
104     int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY;
105     if( h->mb.b_trellis )
106         return x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx );
107     else
108         return h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] );
109 }
110
111 /* All encoding functions must output the correct CBP and NNZ values.
112  * The entropy coding functions will check CBP first, then NNZ, before
113  * actually reading the DCT coefficients.  NNZ still must be correct even
114  * if CBP is zero because of the use of NNZ values for context selection.
115  * "NNZ" need only be 0 or 1 rather than the exact coefficient count because
116  * that is only needed in CAVLC, and will be calculated by CAVLC's residual
117  * coding and stored as necessary. */
118
119 /* This means that decimation can be done merely by adjusting the CBP and NNZ
120  * rather than memsetting the coefficients. */
121
122 void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp )
123 {
124     int nz;
125     uint8_t *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]];
126     uint8_t *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]];
127     ALIGNED_ARRAY_16( int16_t, dct4x4,[16] );
128
129     if( h->mb.b_lossless )
130     {
131         nz = h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst );
132         h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
133         h->mb.i_cbp_luma |= nz<<(idx>>2);
134         return;
135     }
136
137     h->dctf.sub4x4_dct( dct4x4, p_src, p_dst );
138
139     nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx );
140     h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
141     if( nz )
142     {
143         h->mb.i_cbp_luma |= 1<<(idx>>2);
144         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 );
145         h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp );
146         h->dctf.add4x4_idct( p_dst, dct4x4 );
147     }
148 }
149
150 #define STORE_8x8_NNZ(idx,nz)\
151 {\
152     M16( &h->mb.cache.non_zero_count[x264_scan8[idx*4+0]] ) = nz * 0x0101;\
153     M16( &h->mb.cache.non_zero_count[x264_scan8[idx*4+2]] ) = nz * 0x0101;\
154 }
155
156 #define CLEAR_16x16_NNZ \
157 {\
158     M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = 0;\
159     M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = 0;\
160     M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = 0;\
161     M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = 0;\
162 }
163
164 void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp )
165 {
166     int x = 8 * (idx&1);
167     int y = 8 * (idx>>1);
168     int nz;
169     uint8_t *p_src = &h->mb.pic.p_fenc[0][x+y*FENC_STRIDE];
170     uint8_t *p_dst = &h->mb.pic.p_fdec[0][x+y*FDEC_STRIDE];
171     ALIGNED_ARRAY_16( int16_t, dct8x8,[64] );
172
173     if( h->mb.b_lossless )
174     {
175         nz = h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst );
176         STORE_8x8_NNZ(idx,nz);
177         h->mb.i_cbp_luma |= nz<<idx;
178         return;
179     }
180
181     h->dctf.sub8x8_dct8( dct8x8, p_src, p_dst );
182
183     nz = x264_quant_8x8( h, dct8x8, i_qp, 1, idx );
184     if( nz )
185     {
186         h->mb.i_cbp_luma |= 1<<idx;
187         h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 );
188         h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp );
189         h->dctf.add8x8_idct8( p_dst, dct8x8 );
190         STORE_8x8_NNZ(idx,1);
191     }
192     else
193         STORE_8x8_NNZ(idx,0);
194 }
195
196 static void x264_mb_encode_i16x16( x264_t *h, int i_qp )
197 {
198     uint8_t  *p_src = h->mb.pic.p_fenc[0];
199     uint8_t  *p_dst = h->mb.pic.p_fdec[0];
200
201     ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[16] );
202     ALIGNED_ARRAY_16( int16_t, dct_dc4x4,[16] );
203
204     int i, nz;
205     int decimate_score = h->mb.b_dct_decimate ? 0 : 9;
206
207     if( h->mb.b_lossless )
208     {
209         for( i = 0; i < 16; i++ )
210         {
211             int oe = block_idx_xy_fenc[i];
212             int od = block_idx_xy_fdec[i];
213             nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[i], p_src+oe, p_dst+od, &dct_dc4x4[block_idx_yx_1d[i]] );
214             h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
215             h->mb.i_cbp_luma |= nz;
216         }
217         h->mb.i_cbp_luma *= 0xf;
218         h->mb.cache.non_zero_count[x264_scan8[24]] = array_non_zero( dct_dc4x4 );
219         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
220         return;
221     }
222
223     h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
224
225     for( i = 0; i < 16; i++ )
226     {
227         /* copy dc coeff */
228         dct_dc4x4[block_idx_xy_1d[i]] = dct4x4[i][0];
229         dct4x4[i][0] = 0;
230
231         /* quant/scan/dequant */
232         nz = x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i );
233         h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
234         if( nz )
235         {
236             h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] );
237             h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp );
238             if( decimate_score < 6 ) decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[i] );
239             h->mb.i_cbp_luma = 0xf;
240         }
241     }
242
243     /* Writing the 16 CBFs in an i16x16 block is quite costly, so decimation can save many bits. */
244     /* More useful with CAVLC, but still useful with CABAC. */
245     if( decimate_score < 6 )
246     {
247         h->mb.i_cbp_luma = 0;
248         CLEAR_16x16_NNZ
249     }
250
251     h->dctf.dct4x4dc( dct_dc4x4 );
252     if( h->mb.b_trellis )
253         nz = x264_quant_dc_trellis( h, dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1, 0 );
254     else
255         nz = h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 );
256
257     h->mb.cache.non_zero_count[x264_scan8[24]] = nz;
258     if( nz )
259     {
260         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
261
262         /* output samples to fdec */
263         h->dctf.idct4x4dc( dct_dc4x4 );
264         h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp );  /* XXX not inversed */
265         if( h->mb.i_cbp_luma )
266             for( i = 0; i < 16; i++ )
267                 dct4x4[i][0] = dct_dc4x4[block_idx_xy_1d[i]];
268     }
269
270     /* put pixels to fdec */
271     if( h->mb.i_cbp_luma )
272         h->dctf.add16x16_idct( p_dst, dct4x4 );
273     else if( nz )
274         h->dctf.add16x16_idct_dc( p_dst, dct_dc4x4 );
275 }
276
277 static inline int idct_dequant_round_2x2_dc( int16_t ref[4], int16_t dct[4], int dequant_mf[6][16], int i_qp )
278 {
279     int16_t out[4];
280     idct_dequant_2x2_dconly( out, dct, dequant_mf, i_qp );
281     return ((ref[0] ^ (out[0]+32))
282           | (ref[1] ^ (out[1]+32))
283           | (ref[2] ^ (out[2]+32))
284           | (ref[3] ^ (out[3]+32))) >> 6;
285 }
286
287 /* Round down coefficients losslessly in DC-only chroma blocks.
288  * Unlike luma blocks, this can't be done with a lookup table or
289  * other shortcut technique because of the interdependencies
290  * between the coefficients due to the chroma DC transform. */
291 static inline int x264_mb_optimize_chroma_dc( x264_t *h, int b_inter, int i_qp, int16_t dct2x2[4] )
292 {
293     int16_t dct2x2_orig[4];
294     int coeff;
295     int nz = 0;
296
297     /* If the QP is too high, there's no benefit to rounding optimization. */
298     if( h->dequant4_mf[CQM_4IC + b_inter][i_qp%6][0] << (i_qp/6) > 32*64 )
299         return 1;
300
301     idct_dequant_2x2_dconly( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
302     dct2x2_orig[0] += 32;
303     dct2x2_orig[1] += 32;
304     dct2x2_orig[2] += 32;
305     dct2x2_orig[3] += 32;
306
307     /* If the DC coefficients already round to zero, terminate early. */
308     if( !((dct2x2_orig[0]|dct2x2_orig[1]|dct2x2_orig[2]|dct2x2_orig[3])>>6) )
309         return 0;
310
311     /* Start with the highest frequency coefficient... is this the best option? */
312     for( coeff = 3; coeff >= 0; coeff-- )
313     {
314         int sign = dct2x2[coeff] < 0 ? -1 : 1;
315         int level = dct2x2[coeff];
316
317         if( !level )
318             continue;
319
320         while( level )
321         {
322             dct2x2[coeff] = level - sign;
323             if( idct_dequant_round_2x2_dc( dct2x2_orig, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp ) )
324                 break;
325             level -= sign;
326         }
327
328         nz |= level;
329         dct2x2[coeff] = level;
330     }
331
332     return !!nz;
333 }
334
335 void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp )
336 {
337     int i, ch, nz, nz_dc;
338     int b_decimate = b_inter && h->mb.b_dct_decimate;
339     ALIGNED_ARRAY_16( int16_t, dct2x2,[4] );
340     h->mb.i_cbp_chroma = 0;
341
342     /* Early termination: check variance of chroma residual before encoding.
343      * Don't bother trying early termination at low QPs.
344      * Values are experimentally derived. */
345     if( b_decimate && i_qp >= (h->mb.b_trellis ? 12 : 18) )
346     {
347         int thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
348         int ssd[2];
349         int score  = h->pixf.var2_8x8( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE, &ssd[0] );
350             score += h->pixf.var2_8x8( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE, &ssd[1] );
351         if( score < thresh*4 )
352         {
353             h->mb.cache.non_zero_count[x264_scan8[16]] = 0;
354             h->mb.cache.non_zero_count[x264_scan8[17]] = 0;
355             h->mb.cache.non_zero_count[x264_scan8[18]] = 0;
356             h->mb.cache.non_zero_count[x264_scan8[19]] = 0;
357             h->mb.cache.non_zero_count[x264_scan8[20]] = 0;
358             h->mb.cache.non_zero_count[x264_scan8[21]] = 0;
359             h->mb.cache.non_zero_count[x264_scan8[22]] = 0;
360             h->mb.cache.non_zero_count[x264_scan8[23]] = 0;
361             h->mb.cache.non_zero_count[x264_scan8[25]] = 0;
362             h->mb.cache.non_zero_count[x264_scan8[26]] = 0;
363             for( ch = 0; ch < 2; ch++ )
364             {
365                 if( ssd[ch] > thresh )
366                 {
367                     h->dctf.sub8x8_dct_dc( dct2x2, h->mb.pic.p_fenc[1+ch], h->mb.pic.p_fdec[1+ch] );
368                     if( h->mb.b_trellis )
369                         nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
370                     else
371                         nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<
372     1 );
373
374                     if( nz_dc )
375                     {
376                         if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
377                             continue;
378                         h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 1;
379                         zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
380                         idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
381                         h->dctf.add8x8_idct_dc( h->mb.pic.p_fdec[1+ch], dct2x2 );
382                         h->mb.i_cbp_chroma = 1;
383                     }
384                 }
385             }
386             return;
387         }
388     }
389
390     for( ch = 0; ch < 2; ch++ )
391     {
392         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
393         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
394         int i_decimate_score = 0;
395         int nz_ac = 0;
396
397         ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[16] );
398
399         if( h->mb.b_lossless )
400         {
401             for( i = 0; i < 4; i++ )
402             {
403                 int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE;
404                 int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE;
405                 nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od, &h->dct.chroma_dc[ch][i] );
406                 h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
407                 h->mb.i_cbp_chroma |= nz;
408             }
409             h->mb.cache.non_zero_count[x264_scan8[25]+ch] = array_non_zero( h->dct.chroma_dc[ch] );
410             continue;
411         }
412
413         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
414         dct2x2dc( dct2x2, dct4x4 );
415         /* calculate dct coeffs */
416         for( i = 0; i < 4; i++ )
417         {
418             if( h->mb.b_trellis )
419                 nz = x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 1, 0 );
420             else
421                 nz = h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] );
422             h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
423             if( nz )
424             {
425                 nz_ac = 1;
426                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] );
427                 h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp );
428                 if( b_decimate )
429                     i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] );
430             }
431         }
432
433         if( h->mb.b_trellis )
434             nz_dc = x264_quant_dc_trellis( h, dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
435         else
436             nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
437
438         h->mb.cache.non_zero_count[x264_scan8[25]+ch] = nz_dc;
439
440         if( (b_decimate && i_decimate_score < 7) || !nz_ac )
441         {
442             /* Decimate the block */
443             h->mb.cache.non_zero_count[x264_scan8[16+0]+24*ch] = 0;
444             h->mb.cache.non_zero_count[x264_scan8[16+1]+24*ch] = 0;
445             h->mb.cache.non_zero_count[x264_scan8[16+2]+24*ch] = 0;
446             h->mb.cache.non_zero_count[x264_scan8[16+3]+24*ch] = 0;
447             if( !nz_dc ) /* Whole block is empty */
448                 continue;
449             if( !x264_mb_optimize_chroma_dc( h, b_inter, i_qp, dct2x2 ) )
450             {
451                 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 0;
452                 continue;
453             }
454             /* DC-only */
455             zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
456             idct_dequant_2x2_dconly( dct2x2, dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
457             h->dctf.add8x8_idct_dc( p_dst, dct2x2 );
458         }
459         else
460         {
461             h->mb.i_cbp_chroma = 1;
462             if( nz_dc )
463             {
464                 zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
465                 idct_dequant_2x2_dc( dct2x2, dct4x4, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
466             }
467             h->dctf.add8x8_idct( p_dst, dct4x4 );
468         }
469     }
470
471     if( h->mb.i_cbp_chroma )
472         h->mb.i_cbp_chroma = 2;    /* dc+ac (we can't do only ac) */
473     else if( h->mb.cache.non_zero_count[x264_scan8[25]] |
474              h->mb.cache.non_zero_count[x264_scan8[26]] )
475         h->mb.i_cbp_chroma = 1;    /* dc only */
476 }
477
478 static void x264_macroblock_encode_skip( x264_t *h )
479 {
480     h->mb.i_cbp_luma = 0x00;
481     h->mb.i_cbp_chroma = 0x00;
482     memset( h->mb.cache.non_zero_count, 0, X264_SCAN8_SIZE );
483     /* store cbp */
484     h->mb.cbp[h->mb.i_mb_xy] = 0;
485 }
486
487 /*****************************************************************************
488  * x264_macroblock_encode_pskip:
489  *  Encode an already marked skip block
490  *****************************************************************************/
491 static void x264_macroblock_encode_pskip( x264_t *h )
492 {
493     const int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
494                                 h->mb.mv_min[0], h->mb.mv_max[0] );
495     const int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
496                                 h->mb.mv_min[1], h->mb.mv_max[1] );
497
498     /* don't do pskip motion compensation if it was already done in macroblock_analyse */
499     if( !h->mb.b_skip_mc )
500     {
501         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
502                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
503                        mvx, mvy, 16, 16, &h->sh.weight[0][0] );
504
505         h->mc.mc_chroma( h->mb.pic.p_fdec[1],       FDEC_STRIDE,
506                          h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
507                          mvx, mvy, 8, 8 );
508
509         if( h->sh.weight[0][1].weightfn )
510             h->sh.weight[0][1].weightfn[8>>2]( h->mb.pic.p_fdec[1], FDEC_STRIDE,
511                                                h->mb.pic.p_fdec[1], FDEC_STRIDE,
512                                                &h->sh.weight[0][1], 8 );
513
514         h->mc.mc_chroma( h->mb.pic.p_fdec[2],       FDEC_STRIDE,
515                          h->mb.pic.p_fref[0][0][5], h->mb.pic.i_stride[2],
516                          mvx, mvy, 8, 8 );
517
518         if( h->sh.weight[0][2].weightfn )
519             h->sh.weight[0][2].weightfn[8>>2]( h->mb.pic.p_fdec[2], FDEC_STRIDE,
520                                                h->mb.pic.p_fdec[2], FDEC_STRIDE,
521                                                &h->sh.weight[0][2], 8 );
522     }
523
524     x264_macroblock_encode_skip( h );
525 }
526
527 /*****************************************************************************
528  * Intra prediction for predictive lossless mode.
529  *****************************************************************************/
530
531 /* Note that these functions take a shortcut (mc.copy instead of actual pixel prediction) which assumes
532  * that the edge pixels of the reconstructed frame are the same as that of the source frame.  This means
533  * they will only work correctly if the neighboring blocks are losslessly coded.  In practice, this means
534  * lossless mode cannot be mixed with lossy mode within a frame. */
535 /* This can be resolved by explicitly copying the edge pixels after doing the mc.copy, but this doesn't
536  * need to be done unless we decide to allow mixing lossless and lossy compression. */
537
538 void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode )
539 {
540     int stride = h->fenc->i_stride[1] << h->mb.b_interlaced;
541     if( i_mode == I_PRED_CHROMA_V )
542     {
543         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-stride, stride, 8 );
544         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-stride, stride, 8 );
545     }
546     else if( i_mode == I_PRED_CHROMA_H )
547     {
548         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-1, stride, 8 );
549         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-1, stride, 8 );
550     }
551     else
552     {
553         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
554         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
555     }
556 }
557
558 void x264_predict_lossless_4x4( x264_t *h, uint8_t *p_dst, int idx, int i_mode )
559 {
560     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
561     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
562
563     if( i_mode == I_PRED_4x4_V )
564         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
565     else if( i_mode == I_PRED_4x4_H )
566         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
567     else
568         h->predict_4x4[i_mode]( p_dst );
569 }
570
571 void x264_predict_lossless_8x8( x264_t *h, uint8_t *p_dst, int idx, int i_mode, uint8_t edge[33] )
572 {
573     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
574     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
575
576     if( i_mode == I_PRED_8x8_V )
577         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
578     else if( i_mode == I_PRED_8x8_H )
579         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
580     else
581         h->predict_8x8[i_mode]( p_dst, edge );
582 }
583
584 void x264_predict_lossless_16x16( x264_t *h, int i_mode )
585 {
586     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
587     if( i_mode == I_PRED_16x16_V )
588         h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
589     else if( i_mode == I_PRED_16x16_H )
590         h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 );
591     else
592         h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
593 }
594
595 /*****************************************************************************
596  * x264_macroblock_encode:
597  *****************************************************************************/
598 void x264_macroblock_encode( x264_t *h )
599 {
600     int i_cbp_dc = 0;
601     int i_qp = h->mb.i_qp;
602     int b_decimate = h->mb.b_dct_decimate;
603     int b_force_no_skip = 0;
604     int i,idx,nz;
605     h->mb.i_cbp_luma = 0;
606     h->mb.cache.non_zero_count[x264_scan8[24]] = 0;
607
608     if( h->sh.b_mbaff
609         && h->mb.i_mb_xy == h->sh.i_first_mb + h->mb.i_mb_stride
610         && IS_SKIP(h->mb.type[h->sh.i_first_mb]) )
611     {
612         /* The first skip is predicted to be a frame mb pair.
613          * We don't yet support the aff part of mbaff, so force it to non-skip
614          * so that we can pick the aff flag. */
615         b_force_no_skip = 1;
616         if( IS_SKIP(h->mb.i_type) )
617         {
618             if( h->mb.i_type == P_SKIP )
619                 h->mb.i_type = P_L0;
620             else if( h->mb.i_type == B_SKIP )
621                 h->mb.i_type = B_DIRECT;
622         }
623     }
624
625     if( h->mb.i_type == P_SKIP )
626     {
627         /* A bit special */
628         x264_macroblock_encode_pskip( h );
629         return;
630     }
631     if( h->mb.i_type == B_SKIP )
632     {
633         /* don't do bskip motion compensation if it was already done in macroblock_analyse */
634         if( !h->mb.b_skip_mc )
635             x264_mb_mc( h );
636         x264_macroblock_encode_skip( h );
637         return;
638     }
639
640     if( h->mb.i_type == I_16x16 )
641     {
642         const int i_mode = h->mb.i_intra16x16_pred_mode;
643         h->mb.b_transform_8x8 = 0;
644
645         if( h->mb.b_lossless )
646             x264_predict_lossless_16x16( h, i_mode );
647         else
648             h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
649
650         /* encode the 16x16 macroblock */
651         x264_mb_encode_i16x16( h, i_qp );
652     }
653     else if( h->mb.i_type == I_8x8 )
654     {
655         ALIGNED_ARRAY_16( uint8_t, edge,[33] );
656         h->mb.b_transform_8x8 = 1;
657         /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
658         if( h->mb.i_skip_intra )
659         {
660             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
661             M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i8x8_nnz_buf[0];
662             M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i8x8_nnz_buf[1];
663             M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i8x8_nnz_buf[2];
664             M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i8x8_nnz_buf[3];
665             h->mb.i_cbp_luma = h->mb.pic.i8x8_cbp;
666             /* In RD mode, restore the now-overwritten DCT data. */
667             if( h->mb.i_skip_intra == 2 )
668                 h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
669         }
670         for( i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ )
671         {
672             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE];
673             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
674             h->predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] );
675
676             if( h->mb.b_lossless )
677                 x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge );
678             else
679                 h->predict_8x8[i_mode]( p_dst, edge );
680
681             x264_mb_encode_i8x8( h, i, i_qp );
682         }
683     }
684     else if( h->mb.i_type == I_4x4 )
685     {
686         h->mb.b_transform_8x8 = 0;
687         /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
688         if( h->mb.i_skip_intra )
689         {
690             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
691             M32( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) = h->mb.pic.i4x4_nnz_buf[0];
692             M32( &h->mb.cache.non_zero_count[x264_scan8[ 2]] ) = h->mb.pic.i4x4_nnz_buf[1];
693             M32( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) = h->mb.pic.i4x4_nnz_buf[2];
694             M32( &h->mb.cache.non_zero_count[x264_scan8[10]] ) = h->mb.pic.i4x4_nnz_buf[3];
695             h->mb.i_cbp_luma = h->mb.pic.i4x4_cbp;
696             /* In RD mode, restore the now-overwritten DCT data. */
697             if( h->mb.i_skip_intra == 2 )
698                 h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
699         }
700         for( i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ )
701         {
702             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]];
703             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
704
705             if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
706                 /* emulate missing topright samples */
707                 M32( &p_dst[4-FDEC_STRIDE] ) = p_dst[3-FDEC_STRIDE] * 0x01010101U;
708
709             if( h->mb.b_lossless )
710                 x264_predict_lossless_4x4( h, p_dst, i, i_mode );
711             else
712                 h->predict_4x4[i_mode]( p_dst );
713             x264_mb_encode_i4x4( h, i, i_qp );
714         }
715     }
716     else    /* Inter MB */
717     {
718         int i8x8, i4x4;
719         int i_decimate_mb = 0;
720
721         /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
722         if( !h->mb.b_skip_mc )
723             x264_mb_mc( h );
724
725         if( h->mb.b_lossless )
726         {
727             if( h->mb.b_transform_8x8 )
728                 for( i8x8 = 0; i8x8 < 4; i8x8++ )
729                 {
730                     int x = 8*(i8x8&1);
731                     int y = 8*(i8x8>>1);
732                     nz = h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8],
733                                         h->mb.pic.p_fenc[0]+x+y*FENC_STRIDE,
734                                         h->mb.pic.p_fdec[0]+x+y*FDEC_STRIDE );
735                     STORE_8x8_NNZ(i8x8,nz);
736                     h->mb.i_cbp_luma |= nz << i8x8;
737                 }
738             else
739                 for( i4x4 = 0; i4x4 < 16; i4x4++ )
740                 {
741                     nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4],
742                                         h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4],
743                                         h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] );
744                     h->mb.cache.non_zero_count[x264_scan8[i4x4]] = nz;
745                     h->mb.i_cbp_luma |= nz << (i4x4>>2);
746                 }
747         }
748         else if( h->mb.b_transform_8x8 )
749         {
750             ALIGNED_ARRAY_16( int16_t, dct8x8,[4],[64] );
751             b_decimate &= !h->mb.b_trellis; // 8x8 trellis is inherently optimal decimation
752             h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
753             h->nr_count[1] += h->mb.b_noise_reduction * 4;
754
755             for( idx = 0; idx < 4; idx++ )
756             {
757                 if( h->mb.b_noise_reduction )
758                     h->quantf.denoise_dct( dct8x8[idx], h->nr_residual_sum[1], h->nr_offset[1], 64 );
759                 nz = x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx );
760
761                 if( nz )
762                 {
763                     h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] );
764                     if( b_decimate )
765                     {
766                         int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] );
767                         i_decimate_mb += i_decimate_8x8;
768                         if( i_decimate_8x8 >= 4 )
769                             h->mb.i_cbp_luma |= 1<<idx;
770                     }
771                     else
772                         h->mb.i_cbp_luma |= 1<<idx;
773                 }
774             }
775
776             if( i_decimate_mb < 6 && b_decimate )
777             {
778                 h->mb.i_cbp_luma = 0;
779                 CLEAR_16x16_NNZ
780             }
781             else
782             {
783                 for( idx = 0; idx < 4; idx++ )
784                 {
785                     if( h->mb.i_cbp_luma&(1<<idx) )
786                     {
787                         h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
788                         h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][(idx&1)*8 + (idx>>1)*8*FDEC_STRIDE], dct8x8[idx] );
789                         STORE_8x8_NNZ(idx,1);
790                     }
791                     else
792                         STORE_8x8_NNZ(idx,0);
793                 }
794             }
795         }
796         else
797         {
798             ALIGNED_ARRAY_16( int16_t, dct4x4,[16],[16] );
799             h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
800             h->nr_count[0] += h->mb.b_noise_reduction * 16;
801
802             for( i8x8 = 0; i8x8 < 4; i8x8++ )
803             {
804                 int i_decimate_8x8 = 0;
805                 int cbp = 0;
806
807                 /* encode one 4x4 block */
808                 for( i4x4 = 0; i4x4 < 4; i4x4++ )
809                 {
810                     idx = i8x8 * 4 + i4x4;
811
812                     if( h->mb.b_noise_reduction )
813                         h->quantf.denoise_dct( dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 );
814                     nz = x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx );
815                     h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
816
817                     if( nz )
818                     {
819                         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] );
820                         h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[CQM_4PY], i_qp );
821                         if( b_decimate && i_decimate_8x8 < 6 )
822                             i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] );
823                         cbp = 1;
824                     }
825                 }
826
827                 /* decimate this 8x8 block */
828                 i_decimate_mb += i_decimate_8x8;
829                 if( b_decimate )
830                 {
831                     if( i_decimate_8x8 < 4 )
832                         STORE_8x8_NNZ(i8x8,0)
833                     else
834                         h->mb.i_cbp_luma |= 1<<i8x8;
835                 }
836                 else if( cbp )
837                 {
838                     h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
839                     h->mb.i_cbp_luma |= 1<<i8x8;
840                 }
841             }
842
843             if( b_decimate )
844             {
845                 if( i_decimate_mb < 6 )
846                 {
847                     h->mb.i_cbp_luma = 0;
848                     CLEAR_16x16_NNZ
849                 }
850                 else
851                 {
852                     for( i8x8 = 0; i8x8 < 4; i8x8++ )
853                         if( h->mb.i_cbp_luma&(1<<i8x8) )
854                             h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
855                 }
856             }
857         }
858     }
859
860     /* encode chroma */
861     if( IS_INTRA( h->mb.i_type ) )
862     {
863         const int i_mode = h->mb.i_chroma_pred_mode;
864         if( h->mb.b_lossless )
865             x264_predict_lossless_8x8_chroma( h, i_mode );
866         else
867         {
868             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
869             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
870         }
871     }
872
873     /* encode the 8x8 blocks */
874     x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
875
876     if( h->param.b_cabac )
877     {
878         i_cbp_dc = h->mb.cache.non_zero_count[x264_scan8[24]]
879                  | h->mb.cache.non_zero_count[x264_scan8[25]] << 1
880                  | h->mb.cache.non_zero_count[x264_scan8[26]] << 2;
881     }
882
883     /* store cbp */
884     h->mb.cbp[h->mb.i_mb_xy] = (i_cbp_dc << 8) | (h->mb.i_cbp_chroma << 4) | h->mb.i_cbp_luma;
885
886     /* Check for P_SKIP
887      * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
888      *      (if multiple mv give same result)*/
889     if( !b_force_no_skip )
890     {
891         if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
892             !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
893             M32( h->mb.cache.mv[0][x264_scan8[0]] ) == M32( h->mb.cache.pskip_mv )
894             && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
895         {
896             h->mb.i_type = P_SKIP;
897         }
898
899         /* Check for B_SKIP */
900         if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
901         {
902             h->mb.i_type = B_SKIP;
903         }
904     }
905 }
906
907 /*****************************************************************************
908  * x264_macroblock_probe_skip:
909  *  Check if the current MB could be encoded as a [PB]_SKIP
910  *****************************************************************************/
911 int x264_macroblock_probe_skip( x264_t *h, int b_bidir )
912 {
913     ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[16] );
914     ALIGNED_ARRAY_16( int16_t, dct2x2,[4] );
915     ALIGNED_ARRAY_16( int16_t, dctscan,[16] );
916
917     int i_qp = h->mb.i_qp;
918     int mvp[2];
919     int ch, thresh, ssd;
920
921     int i8x8, i4x4;
922     int i_decimate_mb;
923
924     if( !b_bidir )
925     {
926         /* Get the MV */
927         mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
928         mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
929
930         /* Motion compensation */
931         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
932                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
933                        mvp[0], mvp[1], 16, 16, &h->sh.weight[0][0] );
934     }
935
936     for( i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
937     {
938         int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
939         int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
940         /* get luma diff */
941         h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset,
942                                     h->mb.pic.p_fdec[0] + fdec_offset );
943         /* encode one 4x4 block */
944         for( i4x4 = 0; i4x4 < 4; i4x4++ )
945         {
946             if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] ) )
947                 continue;
948             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
949             i_decimate_mb += h->quantf.decimate_score16( dctscan );
950             if( i_decimate_mb >= 6 )
951                 return 0;
952         }
953     }
954
955     /* encode chroma */
956     i_qp = h->mb.i_chroma_qp;
957     thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
958
959     for( ch = 0; ch < 2; ch++ )
960     {
961         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
962         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
963
964         if( !b_bidir )
965         {
966             h->mc.mc_chroma( h->mb.pic.p_fdec[1+ch],       FDEC_STRIDE,
967                              h->mb.pic.p_fref[0][0][4+ch], h->mb.pic.i_stride[1+ch],
968                              mvp[0], mvp[1], 8, 8 );
969
970             if( h->sh.weight[0][1+ch].weightfn )
971                 h->sh.weight[0][1+ch].weightfn[8>>2]( h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
972                                                       h->mb.pic.p_fdec[1+ch], FDEC_STRIDE,
973                                                       &h->sh.weight[0][1+ch], 8 );
974         }
975
976         /* there is almost never a termination during chroma, but we can't avoid the check entirely */
977         /* so instead we check SSD and skip the actual check if the score is low enough. */
978         ssd = h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE );
979         if( ssd < thresh )
980             continue;
981
982         /* The vast majority of chroma checks will terminate during the DC check or the higher
983          * threshold check, so we can save time by doing a DC-only DCT. */
984         h->dctf.sub8x8_dct_dc( dct2x2, p_src, p_dst );
985
986         if( h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 ) )
987             return 0;
988
989         /* If there wasn't a termination in DC, we can check against a much higher threshold. */
990         if( ssd < thresh*4 )
991             continue;
992
993         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
994
995         /* calculate dct coeffs */
996         for( i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ )
997         {
998             /* We don't need to zero the DC coefficient before quantization because we already
999              * checked that all the DCs were zero above at twice the precision that quant4x4
1000              * uses.  This applies even though the DC here is being quantized before the 2x2
1001              * transform. */
1002             if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ) )
1003                 continue;
1004             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
1005             i_decimate_mb += h->quantf.decimate_score15( dctscan );
1006             if( i_decimate_mb >= 7 )
1007                 return 0;
1008         }
1009     }
1010
1011     h->mb.b_skip_mc = 1;
1012     return 1;
1013 }
1014
1015 /****************************************************************************
1016  * DCT-domain noise reduction / adaptive deadzone
1017  * from libavcodec
1018  ****************************************************************************/
1019
1020 void x264_noise_reduction_update( x264_t *h )
1021 {
1022     int cat, i;
1023     for( cat = 0; cat < 2; cat++ )
1024     {
1025         int size = cat ? 64 : 16;
1026         const uint16_t *weight = cat ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
1027
1028         if( h->nr_count[cat] > (cat ? (1<<16) : (1<<18)) )
1029         {
1030             for( i = 0; i < size; i++ )
1031                 h->nr_residual_sum[cat][i] >>= 1;
1032             h->nr_count[cat] >>= 1;
1033         }
1034
1035         for( i = 0; i < size; i++ )
1036             h->nr_offset[cat][i] =
1037                 ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
1038                  + h->nr_residual_sum[cat][i]/2)
1039               / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
1040     }
1041 }
1042
1043 /*****************************************************************************
1044  * RD only; 4 calls to this do not make up for one macroblock_encode.
1045  * doesn't transform chroma dc.
1046  *****************************************************************************/
1047 void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
1048 {
1049     int i_qp = h->mb.i_qp;
1050     uint8_t *p_fenc = h->mb.pic.p_fenc[0] + (i8&1)*8 + (i8>>1)*8*FENC_STRIDE;
1051     uint8_t *p_fdec = h->mb.pic.p_fdec[0] + (i8&1)*8 + (i8>>1)*8*FDEC_STRIDE;
1052     int b_decimate = h->mb.b_dct_decimate;
1053     int nnz8x8 = 0;
1054     int ch, nz;
1055
1056     if( !h->mb.b_skip_mc )
1057         x264_mb_mc_8x8( h, i8 );
1058
1059     if( h->mb.b_lossless )
1060     {
1061         int i4;
1062         if( h->mb.b_transform_8x8 )
1063         {
1064             nnz8x8 = h->zigzagf.sub_8x8( h->dct.luma8x8[i8], p_fenc, p_fdec );
1065             STORE_8x8_NNZ(i8,nnz8x8);
1066         }
1067         else
1068         {
1069             for( i4 = i8*4; i4 < i8*4+4; i4++ )
1070             {
1071                 int nz;
1072                 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4],
1073                                     h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4],
1074                                     h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4] );
1075                 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1076                 nnz8x8 |= nz;
1077             }
1078         }
1079         for( ch = 0; ch < 2; ch++ )
1080         {
1081             int16_t dc;
1082             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
1083             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
1084             nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i8+ch*4], p_fenc, p_fdec, &dc );
1085             h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1086         }
1087     }
1088     else
1089     {
1090         if( h->mb.b_transform_8x8 )
1091         {
1092             ALIGNED_ARRAY_16( int16_t, dct8x8,[64] );
1093             h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec );
1094             nnz8x8 = x264_quant_8x8( h, dct8x8, i_qp, 0, i8 );
1095             if( nnz8x8 )
1096             {
1097                 h->zigzagf.scan_8x8( h->dct.luma8x8[i8], dct8x8 );
1098
1099                 if( b_decimate && !h->mb.b_trellis )
1100                     nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[i8] );
1101
1102                 if( nnz8x8 )
1103                 {
1104                     h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8PY], i_qp );
1105                     h->dctf.add8x8_idct8( p_fdec, dct8x8 );
1106                     STORE_8x8_NNZ(i8,1);
1107                 }
1108                 else
1109                     STORE_8x8_NNZ(i8,0);
1110             }
1111             else
1112                 STORE_8x8_NNZ(i8,0);
1113         }
1114         else
1115         {
1116             int i4;
1117             int i_decimate_8x8 = 0;
1118             ALIGNED_ARRAY_16( int16_t, dct4x4,[4],[16] );
1119             h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec );
1120             for( i4 = 0; i4 < 4; i4++ )
1121             {
1122                 nz = x264_quant_4x4( h, dct4x4[i4], i_qp, DCT_LUMA_4x4, 0, i8*4+i4 );
1123                 h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = nz;
1124                 if( nz )
1125                 {
1126                     h->zigzagf.scan_4x4( h->dct.luma4x4[i8*4+i4], dct4x4[i4] );
1127                     h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[CQM_4PY], i_qp );
1128                     if( b_decimate )
1129                         i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[i8*4+i4] );
1130                     nnz8x8 = 1;
1131                 }
1132             }
1133
1134             if( b_decimate && i_decimate_8x8 < 4 )
1135                 nnz8x8 = 0;
1136
1137             if( nnz8x8 )
1138                 h->dctf.add8x8_idct( p_fdec, dct4x4 );
1139             else
1140                 STORE_8x8_NNZ(i8,0);
1141         }
1142
1143         i_qp = h->mb.i_chroma_qp;
1144
1145         for( ch = 0; ch < 2; ch++ )
1146         {
1147             ALIGNED_ARRAY_16( int16_t, dct4x4,[16] );
1148             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
1149             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
1150
1151             h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1152             dct4x4[0] = 0;
1153
1154             if( h->mb.b_trellis )
1155                 nz = x264_quant_4x4_trellis( h, dct4x4, CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 1, 0 );
1156             else
1157                 nz = h->quantf.quant_4x4( dct4x4, h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
1158
1159             h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1160             if( nz )
1161             {
1162                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i8+ch*4], dct4x4 );
1163                 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PC], i_qp );
1164                 h->dctf.add4x4_idct( p_fdec, dct4x4 );
1165             }
1166         }
1167     }
1168     h->mb.i_cbp_luma &= ~(1 << i8);
1169     h->mb.i_cbp_luma |= nnz8x8 << i8;
1170     h->mb.i_cbp_chroma = 0x02;
1171 }
1172
1173 /*****************************************************************************
1174  * RD only, luma only
1175  *****************************************************************************/
1176 void x264_macroblock_encode_p4x4( x264_t *h, int i4 )
1177 {
1178     int i_qp = h->mb.i_qp;
1179     uint8_t *p_fenc = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[i4]];
1180     uint8_t *p_fdec = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
1181     int nz;
1182
1183     /* Don't need motion compensation as this function is only used in qpel-RD, which caches pixel data. */
1184
1185     if( h->mb.b_lossless )
1186     {
1187         nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4], p_fenc, p_fdec );
1188         h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1189     }
1190     else
1191     {
1192         ALIGNED_ARRAY_16( int16_t, dct4x4,[16] );
1193         h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1194         nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 0, i4 );
1195         h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1196         if( nz )
1197         {
1198             h->zigzagf.scan_4x4( h->dct.luma4x4[i4], dct4x4 );
1199             h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PY], i_qp );
1200             h->dctf.add4x4_idct( p_fdec, dct4x4 );
1201         }
1202     }
1203 }