]> git.sesse.net Git - x264/blob - encoder/macroblock.c
Fix two bugs in QPRD
[x264] / encoder / macroblock.c
1 /*****************************************************************************
2  * macroblock.c: h264 encoder library
3  *****************************************************************************
4  * Copyright (C) 2003-2008 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *****************************************************************************/
24
25 #include "common/common.h"
26 #include "macroblock.h"
27
28 /* These chroma DC functions don't have assembly versions and are only used here. */
29
30 #define ZIG(i,y,x) level[i] = dct[x][y];
31 static inline void zigzag_scan_2x2_dc( int16_t level[4], int16_t dct[2][2] )
32 {
33     ZIG(0,0,0)
34     ZIG(1,0,1)
35     ZIG(2,1,0)
36     ZIG(3,1,1)
37 }
38 #undef ZIG
39
40 #define IDCT_DEQUANT_START \
41     int d0 = dct[0][0] + dct[0][1]; \
42     int d1 = dct[1][0] + dct[1][1]; \
43     int d2 = dct[0][0] - dct[0][1]; \
44     int d3 = dct[1][0] - dct[1][1]; \
45     int dmf = dequant_mf[i_qp%6][0][0]; \
46     int qbits = i_qp/6 - 5; \
47     if( qbits > 0 ) \
48     { \
49         dmf <<= qbits; \
50         qbits = 0; \
51     }
52
53 static inline void idct_dequant_2x2_dc( int16_t dct[2][2], int16_t dct4x4[4][4][4], int dequant_mf[6][4][4], int i_qp )
54 {
55     IDCT_DEQUANT_START
56     dct4x4[0][0][0] = (d0 + d1) * dmf >> -qbits;
57     dct4x4[1][0][0] = (d0 - d1) * dmf >> -qbits;
58     dct4x4[2][0][0] = (d2 + d3) * dmf >> -qbits;
59     dct4x4[3][0][0] = (d2 - d3) * dmf >> -qbits;
60 }
61
62 static inline void idct_dequant_2x2_dconly( int16_t dct[2][2], int dequant_mf[6][4][4], int i_qp )
63 {
64     IDCT_DEQUANT_START
65     dct[0][0] = (d0 + d1) * dmf >> -qbits;
66     dct[0][1] = (d0 - d1) * dmf >> -qbits;
67     dct[1][0] = (d2 + d3) * dmf >> -qbits;
68     dct[1][1] = (d2 - d3) * dmf >> -qbits;
69 }
70
71 static inline void dct2x2dc( int16_t d[2][2], int16_t dct4x4[4][4][4] )
72 {
73     int d0 = dct4x4[0][0][0] + dct4x4[1][0][0];
74     int d1 = dct4x4[2][0][0] + dct4x4[3][0][0];
75     int d2 = dct4x4[0][0][0] - dct4x4[1][0][0];
76     int d3 = dct4x4[2][0][0] - dct4x4[3][0][0];
77     d[0][0] = d0 + d1;
78     d[1][0] = d2 + d3;
79     d[0][1] = d0 - d1;
80     d[1][1] = d2 - d3;
81     dct4x4[0][0][0] = 0;
82     dct4x4[1][0][0] = 0;
83     dct4x4[2][0][0] = 0;
84     dct4x4[3][0][0] = 0;
85 }
86
87 static inline void dct2x2dc_dconly( int16_t d[2][2] )
88 {
89     int d0 = d[0][0] + d[0][1];
90     int d1 = d[1][0] + d[1][1];
91     int d2 = d[0][0] - d[0][1];
92     int d3 = d[1][0] - d[1][1];
93     d[0][0] = d0 + d1;
94     d[1][0] = d2 + d3;
95     d[0][1] = d0 - d1;
96     d[1][1] = d2 - d3;
97 }
98
99 static ALWAYS_INLINE int x264_quant_4x4( x264_t *h, int16_t dct[4][4], int i_qp, int i_ctxBlockCat, int b_intra, int idx )
100 {
101     int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY;
102     if( h->mb.b_trellis )
103         return x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, i_ctxBlockCat, b_intra, 0, idx );
104     else
105         return h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
106 }
107
108 static ALWAYS_INLINE int x264_quant_8x8( x264_t *h, int16_t dct[8][8], int i_qp, int b_intra, int idx )
109 {
110     int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY;
111     if( h->mb.b_trellis )
112         return x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx );
113     else
114         return h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] );
115 }
116
117 /* All encoding functions must output the correct CBP and NNZ values.
118  * The entropy coding functions will check CBP first, then NNZ, before
119  * actually reading the DCT coefficients.  NNZ still must be correct even
120  * if CBP is zero because of the use of NNZ values for context selection.
121  * "NNZ" need only be 0 or 1 rather than the exact coefficient count because
122  * that is only needed in CAVLC, and will be calculated by CAVLC's residual
123  * coding and stored as necessary. */
124
125 /* This means that decimation can be done merely by adjusting the CBP and NNZ
126  * rather than memsetting the coefficients. */
127
128 void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp )
129 {
130     int nz;
131     uint8_t *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]];
132     uint8_t *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]];
133     DECLARE_ALIGNED_16( int16_t dct4x4[4][4] );
134
135     if( h->mb.b_lossless )
136     {
137         nz = h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst );
138         h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
139         h->mb.i_cbp_luma |= nz<<(idx>>2);
140         return;
141     }
142
143     h->dctf.sub4x4_dct( dct4x4, p_src, p_dst );
144
145     nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx );
146     h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
147     if( nz )
148     {
149         h->mb.i_cbp_luma |= 1<<(idx>>2);
150         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 );
151         h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp );
152         h->dctf.add4x4_idct( p_dst, dct4x4 );
153     }
154 }
155
156 #define STORE_8x8_NNZ(idx,nz)\
157 {\
158     *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[idx*4+0]] = nz * 0x0101;\
159     *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[idx*4+2]] = nz * 0x0101;\
160 }
161
162 void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp )
163 {
164     int x = 8 * (idx&1);
165     int y = 8 * (idx>>1);
166     int nz;
167     uint8_t *p_src = &h->mb.pic.p_fenc[0][x+y*FENC_STRIDE];
168     uint8_t *p_dst = &h->mb.pic.p_fdec[0][x+y*FDEC_STRIDE];
169     DECLARE_ALIGNED_16( int16_t dct8x8[8][8] );
170
171     if( h->mb.b_lossless )
172     {
173         nz = h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst );
174         STORE_8x8_NNZ(idx,nz);
175         h->mb.i_cbp_luma |= nz<<idx;
176         return;
177     }
178
179     h->dctf.sub8x8_dct8( dct8x8, p_src, p_dst );
180
181     nz = x264_quant_8x8( h, dct8x8, i_qp, 1, idx );
182     if( nz )
183     {
184         h->mb.i_cbp_luma |= 1<<idx;
185         h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 );
186         h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp );
187         h->dctf.add8x8_idct8( p_dst, dct8x8 );
188         STORE_8x8_NNZ(idx,1);
189     }
190     else
191         STORE_8x8_NNZ(idx,0);
192 }
193
194 static void x264_mb_encode_i16x16( x264_t *h, int i_qp )
195 {
196     uint8_t  *p_src = h->mb.pic.p_fenc[0];
197     uint8_t  *p_dst = h->mb.pic.p_fdec[0];
198
199     DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] );
200     DECLARE_ALIGNED_16( int16_t dct_dc4x4[4][4] );
201
202     int i, nz;
203     int b_decimate = h->sh.i_type == SLICE_TYPE_B || (h->param.analyse.b_dct_decimate && h->sh.i_type == SLICE_TYPE_P);
204     int decimate_score = b_decimate ? 0 : 9;
205
206     if( h->mb.b_lossless )
207     {
208         for( i = 0; i < 16; i++ )
209         {
210             int oe = block_idx_xy_fenc[i];
211             int od = block_idx_xy_fdec[i];
212             nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[i], p_src+oe, p_dst+od, &dct_dc4x4[0][block_idx_yx_1d[i]] );
213             h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
214             h->mb.i_cbp_luma |= nz;
215         }
216         h->mb.i_cbp_luma *= 0xf;
217         h->mb.cache.non_zero_count[x264_scan8[24]] = array_non_zero( dct_dc4x4 );
218         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
219         return;
220     }
221
222     h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
223
224     for( i = 0; i < 16; i++ )
225     {
226         /* copy dc coeff */
227         dct_dc4x4[0][block_idx_xy_1d[i]] = dct4x4[i][0][0];
228         dct4x4[i][0][0] = 0;
229
230         /* quant/scan/dequant */
231         nz = x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i );
232         h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
233         if( nz )
234         {
235             h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] );
236             h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp );
237             if( decimate_score < 6 ) decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[i] );
238             h->mb.i_cbp_luma = 0xf;
239         }
240     }
241
242     /* Writing the 16 CBFs in an i16x16 block is quite costly, so decimation can save many bits. */
243     /* More useful with CAVLC, but still useful with CABAC. */
244     if( decimate_score < 6 )
245     {
246         h->mb.i_cbp_luma = 0;
247         *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = 0;
248         *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = 0;
249         *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = 0;
250         *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = 0;
251     }
252
253     h->dctf.dct4x4dc( dct_dc4x4 );
254     if( h->mb.b_trellis )
255         nz = x264_quant_dc_trellis( h, (int16_t*)dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1, 0 );
256     else
257         nz = h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 );
258
259     h->mb.cache.non_zero_count[x264_scan8[24]] = nz;
260     if( nz )
261     {
262         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
263
264         /* output samples to fdec */
265         h->dctf.idct4x4dc( dct_dc4x4 );
266         h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp );  /* XXX not inversed */
267         if( h->mb.i_cbp_luma )
268             for( i = 0; i < 16; i++ )
269                 dct4x4[i][0][0] = dct_dc4x4[0][block_idx_xy_1d[i]];
270     }
271
272     /* put pixels to fdec */
273     if( h->mb.i_cbp_luma )
274         h->dctf.add16x16_idct( p_dst, dct4x4 );
275     else if( nz )
276         h->dctf.add16x16_idct_dc( p_dst, dct_dc4x4 );
277 }
278
279 void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp )
280 {
281     int i, ch, nz, nz_dc;
282     int b_decimate = b_inter && (h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate);
283     DECLARE_ALIGNED_16( int16_t dct2x2[2][2]  );
284     h->mb.i_cbp_chroma = 0;
285
286     /* Early termination: check variance of chroma residual before encoding.
287      * Don't bother trying early termination at low QPs.
288      * Values are experimentally derived. */
289     if( b_decimate && i_qp >= (h->mb.b_trellis ? 12 : 18) )
290     {
291         int thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
292         int ssd[2];
293         int score  = h->pixf.var2_8x8( h->mb.pic.p_fenc[1], FENC_STRIDE, h->mb.pic.p_fdec[1], FDEC_STRIDE, &ssd[0] );
294             score += h->pixf.var2_8x8( h->mb.pic.p_fenc[2], FENC_STRIDE, h->mb.pic.p_fdec[2], FDEC_STRIDE, &ssd[1] );
295         if( score < thresh*4 )
296         {
297             h->mb.cache.non_zero_count[x264_scan8[16]] = 0;
298             h->mb.cache.non_zero_count[x264_scan8[17]] = 0;
299             h->mb.cache.non_zero_count[x264_scan8[18]] = 0;
300             h->mb.cache.non_zero_count[x264_scan8[19]] = 0;
301             h->mb.cache.non_zero_count[x264_scan8[20]] = 0;
302             h->mb.cache.non_zero_count[x264_scan8[21]] = 0;
303             h->mb.cache.non_zero_count[x264_scan8[22]] = 0;
304             h->mb.cache.non_zero_count[x264_scan8[23]] = 0;
305             h->mb.cache.non_zero_count[x264_scan8[25]] = 0;
306             h->mb.cache.non_zero_count[x264_scan8[26]] = 0;
307             for( ch = 0; ch < 2; ch++ )
308             {
309                 if( ssd[ch] > thresh )
310                 {
311                     h->dctf.sub8x8_dct_dc( dct2x2, h->mb.pic.p_fenc[1+ch], h->mb.pic.p_fdec[1+ch] );
312                     dct2x2dc_dconly( dct2x2 );
313                     if( h->mb.b_trellis )
314                         nz_dc = x264_quant_dc_trellis( h, (int16_t*)dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
315                     else
316                         nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<
317     1 );
318                     if( nz_dc )
319                     {
320                         h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 1;
321                         zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
322                         idct_dequant_2x2_dconly( dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
323                         h->dctf.add8x8_idct_dc( h->mb.pic.p_fdec[1+ch], dct2x2 );
324                         h->mb.i_cbp_chroma = 1;
325                     }
326                 }
327             }
328             return;
329         }
330     }
331
332     for( ch = 0; ch < 2; ch++ )
333     {
334         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
335         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
336         int i_decimate_score = 0;
337         int nz_ac = 0;
338
339         DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] );
340
341         if( h->mb.b_lossless )
342         {
343             for( i = 0; i < 4; i++ )
344             {
345                 int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE;
346                 int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE;
347                 nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od, &h->dct.chroma_dc[ch][i] );
348                 h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
349                 h->mb.i_cbp_chroma |= nz;
350             }
351             h->mb.cache.non_zero_count[x264_scan8[25]+ch] = array_non_zero( h->dct.chroma_dc[ch] );
352             continue;
353         }
354
355         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
356         dct2x2dc( dct2x2, dct4x4 );
357         /* calculate dct coeffs */
358         for( i = 0; i < 4; i++ )
359         {
360             if( h->mb.b_trellis )
361                 nz = x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 1, 0 );
362             else
363                 nz = h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] );
364             h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
365             if( nz )
366             {
367                 nz_ac = 1;
368                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] );
369                 h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp );
370                 if( b_decimate )
371                     i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] );
372             }
373         }
374
375         if( h->mb.b_trellis )
376             nz_dc = x264_quant_dc_trellis( h, (int16_t*)dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter, 1 );
377         else
378             nz_dc = h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
379
380         h->mb.cache.non_zero_count[x264_scan8[25]+ch] = nz_dc;
381
382         if( (b_decimate && i_decimate_score < 7) || !nz_ac )
383         {
384             /* Decimate the block */
385             h->mb.cache.non_zero_count[x264_scan8[16+0]+24*ch] = 0;
386             h->mb.cache.non_zero_count[x264_scan8[16+1]+24*ch] = 0;
387             h->mb.cache.non_zero_count[x264_scan8[16+2]+24*ch] = 0;
388             h->mb.cache.non_zero_count[x264_scan8[16+3]+24*ch] = 0;
389             if( !nz_dc ) /* Whole block is empty */
390                 continue;
391             /* DC-only */
392             zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
393             idct_dequant_2x2_dconly( dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
394             h->dctf.add8x8_idct_dc( p_dst, dct2x2 );
395         }
396         else
397         {
398             h->mb.i_cbp_chroma = 1;
399             if( nz_dc )
400             {
401                 zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
402                 idct_dequant_2x2_dc( dct2x2, dct4x4, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
403             }
404             h->dctf.add8x8_idct( p_dst, dct4x4 );
405         }
406     }
407
408     if( h->mb.i_cbp_chroma )
409         h->mb.i_cbp_chroma = 2;    /* dc+ac (we can't do only ac) */
410     else if( h->mb.cache.non_zero_count[x264_scan8[25]] |
411              h->mb.cache.non_zero_count[x264_scan8[26]] )
412         h->mb.i_cbp_chroma = 1;    /* dc only */
413 }
414
415 static void x264_macroblock_encode_skip( x264_t *h )
416 {
417     h->mb.i_cbp_luma = 0x00;
418     h->mb.i_cbp_chroma = 0x00;
419     memset( h->mb.cache.non_zero_count, 0, X264_SCAN8_SIZE );
420     /* store cbp */
421     h->mb.cbp[h->mb.i_mb_xy] = 0;
422 }
423
424 /*****************************************************************************
425  * x264_macroblock_encode_pskip:
426  *  Encode an already marked skip block
427  *****************************************************************************/
428 static void x264_macroblock_encode_pskip( x264_t *h )
429 {
430     const int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
431                                 h->mb.mv_min[0], h->mb.mv_max[0] );
432     const int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
433                                 h->mb.mv_min[1], h->mb.mv_max[1] );
434
435     /* don't do pskip motion compensation if it was already done in macroblock_analyse */
436     if( !h->mb.b_skip_mc )
437     {
438         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
439                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
440                        mvx, mvy, 16, 16 );
441
442         h->mc.mc_chroma( h->mb.pic.p_fdec[1],       FDEC_STRIDE,
443                          h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
444                          mvx, mvy, 8, 8 );
445
446         h->mc.mc_chroma( h->mb.pic.p_fdec[2],       FDEC_STRIDE,
447                          h->mb.pic.p_fref[0][0][5], h->mb.pic.i_stride[2],
448                          mvx, mvy, 8, 8 );
449     }
450
451     x264_macroblock_encode_skip( h );
452 }
453
454 /*****************************************************************************
455  * Intra prediction for predictive lossless mode.
456  *****************************************************************************/
457
458 /* Note that these functions take a shortcut (mc.copy instead of actual pixel prediction) which assumes
459  * that the edge pixels of the reconstructed frame are the same as that of the source frame.  This means
460  * they will only work correctly if the neighboring blocks are losslessly coded.  In practice, this means
461  * lossless mode cannot be mixed with lossy mode within a frame. */
462 /* This can be resolved by explicitly copying the edge pixels after doing the mc.copy, but this doesn't
463  * need to be done unless we decide to allow mixing lossless and lossy compression. */
464
465 void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode )
466 {
467     int stride = h->fenc->i_stride[1] << h->mb.b_interlaced;
468     if( i_mode == I_PRED_CHROMA_V )
469     {
470         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-stride, stride, 8 );
471         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-stride, stride, 8 );
472     }
473     else if( i_mode == I_PRED_CHROMA_H )
474     {
475         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-1, stride, 8 );
476         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-1, stride, 8 );
477     }
478     else
479     {
480         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
481         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
482     }
483 }
484
485 void x264_predict_lossless_4x4( x264_t *h, uint8_t *p_dst, int idx, int i_mode )
486 {
487     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
488     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
489
490     if( i_mode == I_PRED_4x4_V )
491         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
492     else if( i_mode == I_PRED_4x4_H )
493         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
494     else
495         h->predict_4x4[i_mode]( p_dst );
496 }
497
498 void x264_predict_lossless_8x8( x264_t *h, uint8_t *p_dst, int idx, int i_mode, uint8_t edge[33] )
499 {
500     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
501     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
502
503     if( i_mode == I_PRED_8x8_V )
504         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
505     else if( i_mode == I_PRED_8x8_H )
506         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
507     else
508         h->predict_8x8[i_mode]( p_dst, edge );
509 }
510
511 void x264_predict_lossless_16x16( x264_t *h, int i_mode )
512 {
513     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
514     if( i_mode == I_PRED_16x16_V )
515         h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
516     else if( i_mode == I_PRED_16x16_H )
517         h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 );
518     else
519         h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
520 }
521
522 /*****************************************************************************
523  * x264_macroblock_encode:
524  *****************************************************************************/
525 void x264_macroblock_encode( x264_t *h )
526 {
527     int i_cbp_dc = 0;
528     int i_qp = h->mb.i_qp;
529     int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate;
530     int b_force_no_skip = 0;
531     int i,idx,nz;
532     h->mb.i_cbp_luma = 0;
533     h->mb.cache.non_zero_count[x264_scan8[24]] = 0;
534
535     if( h->sh.b_mbaff
536         && h->mb.i_mb_xy == h->sh.i_first_mb + h->mb.i_mb_stride
537         && IS_SKIP(h->mb.type[h->sh.i_first_mb]) )
538     {
539         /* The first skip is predicted to be a frame mb pair.
540          * We don't yet support the aff part of mbaff, so force it to non-skip
541          * so that we can pick the aff flag. */
542         b_force_no_skip = 1;
543         if( IS_SKIP(h->mb.i_type) )
544         {
545             if( h->mb.i_type == P_SKIP )
546                 h->mb.i_type = P_L0;
547             else if( h->mb.i_type == B_SKIP )
548                 h->mb.i_type = B_DIRECT;
549         }
550     }
551
552     if( h->mb.i_type == P_SKIP )
553     {
554         /* A bit special */
555         x264_macroblock_encode_pskip( h );
556         return;
557     }
558     if( h->mb.i_type == B_SKIP )
559     {
560         /* don't do bskip motion compensation if it was already done in macroblock_analyse */
561         if( !h->mb.b_skip_mc )
562             x264_mb_mc( h );
563         x264_macroblock_encode_skip( h );
564         return;
565     }
566
567     if( h->mb.i_type == I_16x16 )
568     {
569         const int i_mode = h->mb.i_intra16x16_pred_mode;
570         h->mb.b_transform_8x8 = 0;
571
572         if( h->mb.b_lossless )
573             x264_predict_lossless_16x16( h, i_mode );
574         else
575             h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
576
577         /* encode the 16x16 macroblock */
578         x264_mb_encode_i16x16( h, i_qp );
579     }
580     else if( h->mb.i_type == I_8x8 )
581     {
582         DECLARE_ALIGNED_16( uint8_t edge[33] );
583         h->mb.b_transform_8x8 = 1;
584         /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
585         if( h->mb.i_skip_intra )
586         {
587             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
588             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = h->mb.pic.i8x8_nnz_buf[0];
589             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = h->mb.pic.i8x8_nnz_buf[1];
590             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = h->mb.pic.i8x8_nnz_buf[2];
591             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = h->mb.pic.i8x8_nnz_buf[3];
592             h->mb.i_cbp_luma = h->mb.pic.i8x8_cbp;
593             /* In RD mode, restore the now-overwritten DCT data. */
594             if( h->mb.i_skip_intra == 2 )
595                 h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
596         }
597         for( i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ )
598         {
599             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE];
600             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
601             h->predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] );
602
603             if( h->mb.b_lossless )
604                 x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge );
605             else
606                 h->predict_8x8[i_mode]( p_dst, edge );
607
608             x264_mb_encode_i8x8( h, i, i_qp );
609         }
610     }
611     else if( h->mb.i_type == I_4x4 )
612     {
613         h->mb.b_transform_8x8 = 0;
614         /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
615         if( h->mb.i_skip_intra )
616         {
617             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
618             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = h->mb.pic.i4x4_nnz_buf[0];
619             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = h->mb.pic.i4x4_nnz_buf[1];
620             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = h->mb.pic.i4x4_nnz_buf[2];
621             *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = h->mb.pic.i4x4_nnz_buf[3];
622             h->mb.i_cbp_luma = h->mb.pic.i4x4_cbp;
623             /* In RD mode, restore the now-overwritten DCT data. */
624             if( h->mb.i_skip_intra == 2 )
625                 h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
626         }
627         for( i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ )
628         {
629             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]];
630             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
631
632             if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
633                 /* emulate missing topright samples */
634                 *(uint32_t*) &p_dst[4-FDEC_STRIDE] = p_dst[3-FDEC_STRIDE] * 0x01010101U;
635
636             if( h->mb.b_lossless )
637                 x264_predict_lossless_4x4( h, p_dst, i, i_mode );
638             else
639                 h->predict_4x4[i_mode]( p_dst );
640             x264_mb_encode_i4x4( h, i, i_qp );
641         }
642     }
643     else    /* Inter MB */
644     {
645         int i8x8, i4x4;
646         int i_decimate_mb = 0;
647
648         /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
649         if( !h->mb.b_skip_mc )
650             x264_mb_mc( h );
651
652         if( h->mb.b_lossless )
653         {
654             if( h->mb.b_transform_8x8 )
655                 for( i8x8 = 0; i8x8 < 4; i8x8++ )
656                 {
657                     int x = 8*(i8x8&1);
658                     int y = 8*(i8x8>>1);
659                     nz = h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8],
660                                         h->mb.pic.p_fenc[0]+x+y*FENC_STRIDE,
661                                         h->mb.pic.p_fdec[0]+x+y*FDEC_STRIDE );
662                     STORE_8x8_NNZ(i8x8,nz);
663                     h->mb.i_cbp_luma |= nz << i8x8;
664                 }
665             else
666                 for( i4x4 = 0; i4x4 < 16; i4x4++ )
667                 {
668                     nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4],
669                                         h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4],
670                                         h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] );
671                     h->mb.cache.non_zero_count[x264_scan8[i4x4]] = nz;
672                     h->mb.i_cbp_luma |= nz << (i4x4>>2);
673                 }
674         }
675         else if( h->mb.b_transform_8x8 )
676         {
677             DECLARE_ALIGNED_16( int16_t dct8x8[4][8][8] );
678             b_decimate &= !h->mb.b_trellis; // 8x8 trellis is inherently optimal decimation
679             h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
680             h->nr_count[1] += h->mb.b_noise_reduction * 4;
681
682             for( idx = 0; idx < 4; idx++ )
683             {
684                 if( h->mb.b_noise_reduction )
685                     h->quantf.denoise_dct( *dct8x8[idx], h->nr_residual_sum[1], h->nr_offset[1], 64 );
686                 nz = x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx );
687
688                 if( nz )
689                 {
690                     h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] );
691                     if( b_decimate )
692                     {
693                         int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] );
694                         i_decimate_mb += i_decimate_8x8;
695                         if( i_decimate_8x8 >= 4 )
696                             h->mb.i_cbp_luma |= 1<<idx;
697                     }
698                     else
699                         h->mb.i_cbp_luma |= 1<<idx;
700                 }
701             }
702
703             if( i_decimate_mb < 6 && b_decimate )
704             {
705                 h->mb.i_cbp_luma = 0;
706                 *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = 0;
707                 *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = 0;
708                 *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = 0;
709                 *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = 0;
710             }
711             else
712             {
713                 for( idx = 0; idx < 4; idx++ )
714                 {
715                     if( h->mb.i_cbp_luma&(1<<idx) )
716                     {
717                         h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
718                         h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][(idx&1)*8 + (idx>>1)*8*FDEC_STRIDE], dct8x8[idx] );
719                         STORE_8x8_NNZ(idx,1);
720                     }
721                     else
722                         STORE_8x8_NNZ(idx,0);
723                 }
724             }
725         }
726         else
727         {
728             DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] );
729             h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
730             h->nr_count[0] += h->mb.b_noise_reduction * 16;
731
732             for( i8x8 = 0; i8x8 < 4; i8x8++ )
733             {
734                 int i_decimate_8x8 = 0;
735                 int cbp = 0;
736
737                 /* encode one 4x4 block */
738                 for( i4x4 = 0; i4x4 < 4; i4x4++ )
739                 {
740                     idx = i8x8 * 4 + i4x4;
741
742                     if( h->mb.b_noise_reduction )
743                         h->quantf.denoise_dct( *dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 );
744                     nz = x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx );
745                     h->mb.cache.non_zero_count[x264_scan8[idx]] = nz;
746
747                     if( nz )
748                     {
749                         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] );
750                         h->quantf.dequant_4x4( dct4x4[idx], h->dequant4_mf[CQM_4PY], i_qp );
751                         if( b_decimate && i_decimate_8x8 < 6 )
752                             i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] );
753                         cbp = 1;
754                     }
755                 }
756
757                 /* decimate this 8x8 block */
758                 i_decimate_mb += i_decimate_8x8;
759                 if( b_decimate )
760                 {
761                     if( i_decimate_8x8 < 4 )
762                         STORE_8x8_NNZ(i8x8,0)
763                     else
764                         h->mb.i_cbp_luma |= 1<<i8x8;
765                 }
766                 else if( cbp )
767                 {
768                     h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
769                     h->mb.i_cbp_luma |= 1<<i8x8;
770                 }
771             }
772
773             if( b_decimate )
774             {
775                 if( i_decimate_mb < 6 )
776                 {
777                     h->mb.i_cbp_luma = 0;
778                     *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 0]] = 0;
779                     *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 2]] = 0;
780                     *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[ 8]] = 0;
781                     *(uint32_t*)&h->mb.cache.non_zero_count[x264_scan8[10]] = 0;
782                 }
783                 else
784                 {
785                     for( i8x8 = 0; i8x8 < 4; i8x8++ )
786                         if( h->mb.i_cbp_luma&(1<<i8x8) )
787                             h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
788                 }
789             }
790         }
791     }
792
793     /* encode chroma */
794     if( IS_INTRA( h->mb.i_type ) )
795     {
796         const int i_mode = h->mb.i_chroma_pred_mode;
797         if( h->mb.b_lossless )
798             x264_predict_lossless_8x8_chroma( h, i_mode );
799         else
800         {
801             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
802             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
803         }
804     }
805
806     /* encode the 8x8 blocks */
807     x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
808
809     if( h->param.b_cabac )
810     {
811         i_cbp_dc = h->mb.cache.non_zero_count[x264_scan8[24]]
812                  | h->mb.cache.non_zero_count[x264_scan8[25]] << 1
813                  | h->mb.cache.non_zero_count[x264_scan8[26]] << 2;
814     }
815
816     /* store cbp */
817     h->mb.cbp[h->mb.i_mb_xy] = (i_cbp_dc << 8) | (h->mb.i_cbp_chroma << 4) | h->mb.i_cbp_luma;
818
819     /* Check for P_SKIP
820      * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
821      *      (if multiple mv give same result)*/
822     if( !b_force_no_skip )
823     {
824         if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
825             !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
826             *(uint32_t*)h->mb.cache.mv[0][x264_scan8[0]] == *(uint32_t*)h->mb.cache.pskip_mv
827             && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
828         {
829             h->mb.i_type = P_SKIP;
830         }
831
832         /* Check for B_SKIP */
833         if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
834         {
835             h->mb.i_type = B_SKIP;
836         }
837     }
838 }
839
840 /*****************************************************************************
841  * x264_macroblock_probe_skip:
842  *  Check if the current MB could be encoded as a [PB]_SKIP (it supposes you use
843  *  the previous QP
844  *****************************************************************************/
845 int x264_macroblock_probe_skip( x264_t *h, int b_bidir )
846 {
847     DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] );
848     DECLARE_ALIGNED_16( int16_t dct2x2[2][2] );
849     DECLARE_ALIGNED_16( int16_t dctscan[16] );
850
851     int i_qp = h->mb.i_qp;
852     int mvp[2];
853     int ch, thresh, ssd;
854
855     int i8x8, i4x4;
856     int i_decimate_mb;
857
858     if( !b_bidir )
859     {
860         /* Get the MV */
861         mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
862         mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
863
864         /* Motion compensation */
865         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
866                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
867                        mvp[0], mvp[1], 16, 16 );
868     }
869
870     for( i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
871     {
872         int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
873         int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
874         /* get luma diff */
875         h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset,
876                                     h->mb.pic.p_fdec[0] + fdec_offset );
877         /* encode one 4x4 block */
878         for( i4x4 = 0; i4x4 < 4; i4x4++ )
879         {
880             if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] ) )
881                 continue;
882             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
883             i_decimate_mb += h->quantf.decimate_score16( dctscan );
884             if( i_decimate_mb >= 6 )
885                 return 0;
886         }
887     }
888
889     /* encode chroma */
890     i_qp = h->mb.i_chroma_qp;
891     thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
892
893     for( ch = 0; ch < 2; ch++ )
894     {
895         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
896         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
897
898         if( !b_bidir )
899         {
900             h->mc.mc_chroma( h->mb.pic.p_fdec[1+ch],       FDEC_STRIDE,
901                              h->mb.pic.p_fref[0][0][4+ch], h->mb.pic.i_stride[1+ch],
902                              mvp[0], mvp[1], 8, 8 );
903         }
904
905         /* there is almost never a termination during chroma, but we can't avoid the check entirely */
906         /* so instead we check SSD and skip the actual check if the score is low enough. */
907         ssd = h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE );
908         if( ssd < thresh )
909             continue;
910
911         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
912
913         /* calculate dct DC */
914         dct2x2dc( dct2x2, dct4x4 );
915         if( h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 ) )
916             return 0;
917
918         /* If there wasn't a termination in DC, we can check against a much higher threshold. */
919         if( ssd < thresh*4 )
920             continue;
921
922         /* calculate dct coeffs */
923         for( i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ )
924         {
925             if( !h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] ) )
926                 continue;
927             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
928             i_decimate_mb += h->quantf.decimate_score15( dctscan );
929             if( i_decimate_mb >= 7 )
930                 return 0;
931         }
932     }
933
934     h->mb.b_skip_mc = 1;
935     return 1;
936 }
937
938 /****************************************************************************
939  * DCT-domain noise reduction / adaptive deadzone
940  * from libavcodec
941  ****************************************************************************/
942
943 void x264_noise_reduction_update( x264_t *h )
944 {
945     int cat, i;
946     for( cat = 0; cat < 2; cat++ )
947     {
948         int size = cat ? 64 : 16;
949         const uint16_t *weight = cat ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
950
951         if( h->nr_count[cat] > (cat ? (1<<16) : (1<<18)) )
952         {
953             for( i = 0; i < size; i++ )
954                 h->nr_residual_sum[cat][i] >>= 1;
955             h->nr_count[cat] >>= 1;
956         }
957
958         for( i = 0; i < size; i++ )
959             h->nr_offset[cat][i] =
960                 ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
961                  + h->nr_residual_sum[cat][i]/2)
962               / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
963     }
964 }
965
966 /*****************************************************************************
967  * RD only; 4 calls to this do not make up for one macroblock_encode.
968  * doesn't transform chroma dc.
969  *****************************************************************************/
970 void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
971 {
972     int i_qp = h->mb.i_qp;
973     uint8_t *p_fenc = h->mb.pic.p_fenc[0] + (i8&1)*8 + (i8>>1)*8*FENC_STRIDE;
974     uint8_t *p_fdec = h->mb.pic.p_fdec[0] + (i8&1)*8 + (i8>>1)*8*FDEC_STRIDE;
975     int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate;
976     int nnz8x8 = 0;
977     int ch, nz;
978
979     x264_mb_mc_8x8( h, i8 );
980
981     if( h->mb.b_lossless )
982     {
983         int i4;
984         if( h->mb.b_transform_8x8 )
985         {
986             nnz8x8 = h->zigzagf.sub_8x8( h->dct.luma8x8[i8], p_fenc, p_fdec );
987             STORE_8x8_NNZ(i8,nnz8x8);
988         }
989         else
990         {
991             for( i4 = i8*4; i4 < i8*4+4; i4++ )
992             {
993                 int nz;
994                 nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4],
995                                     h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4],
996                                     h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4] );
997                 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
998                 nnz8x8 |= nz;
999             }
1000         }
1001         for( ch = 0; ch < 2; ch++ )
1002         {
1003             int16_t dc;
1004             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
1005             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
1006             nz = h->zigzagf.sub_4x4ac( h->dct.luma4x4[16+i8+ch*4], p_fenc, p_fdec, &dc );
1007             h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1008         }
1009     }
1010     else
1011     {
1012         if( h->mb.b_transform_8x8 )
1013         {
1014             DECLARE_ALIGNED_16( int16_t dct8x8[8][8] );
1015             h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec );
1016             nnz8x8 = x264_quant_8x8( h, dct8x8, i_qp, 0, i8 );
1017             if( nnz8x8 )
1018             {
1019                 h->zigzagf.scan_8x8( h->dct.luma8x8[i8], dct8x8 );
1020
1021                 if( b_decimate && !h->mb.b_trellis )
1022                     nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[i8] );
1023
1024                 if( nnz8x8 )
1025                 {
1026                     h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8PY], i_qp );
1027                     h->dctf.add8x8_idct8( p_fdec, dct8x8 );
1028                     STORE_8x8_NNZ(i8,1);
1029                 }
1030                 else
1031                     STORE_8x8_NNZ(i8,0);
1032             }
1033             else
1034                 STORE_8x8_NNZ(i8,0);
1035         }
1036         else
1037         {
1038             int i4;
1039             int i_decimate_8x8 = 0;
1040             DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] );
1041             h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec );
1042             for( i4 = 0; i4 < 4; i4++ )
1043             {
1044                 nz = x264_quant_4x4( h, dct4x4[i4], i_qp, DCT_LUMA_4x4, 0, i8*4+i4 );
1045                 h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = nz;
1046                 if( nz )
1047                 {
1048                     h->zigzagf.scan_4x4( h->dct.luma4x4[i8*4+i4], dct4x4[i4] );
1049                     h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[CQM_4PY], i_qp );
1050                     if( b_decimate )
1051                         i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[i8*4+i4] );
1052                     nnz8x8 = 1;
1053                 }
1054             }
1055
1056             if( b_decimate && i_decimate_8x8 < 4 )
1057                 nnz8x8 = 0;
1058
1059             if( nnz8x8 )
1060                 h->dctf.add8x8_idct( p_fdec, dct4x4 );
1061             else
1062                 STORE_8x8_NNZ(i8,0);
1063         }
1064
1065         i_qp = h->mb.i_chroma_qp;
1066
1067         for( ch = 0; ch < 2; ch++ )
1068         {
1069             DECLARE_ALIGNED_16( int16_t dct4x4[4][4] );
1070             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
1071             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
1072
1073             h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1074             dct4x4[0][0] = 0;
1075
1076             if( h->mb.b_trellis )
1077                 nz = x264_quant_4x4_trellis( h, dct4x4, CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 1, 0 );
1078             else
1079                 nz = h->quantf.quant_4x4( dct4x4, h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
1080
1081             h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = nz;
1082             if( nz )
1083             {
1084                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i8+ch*4], dct4x4 );
1085                 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PC], i_qp );
1086                 h->dctf.add4x4_idct( p_fdec, dct4x4 );
1087             }
1088         }
1089     }
1090     h->mb.i_cbp_luma &= ~(1 << i8);
1091     h->mb.i_cbp_luma |= nnz8x8 << i8;
1092     h->mb.i_cbp_chroma = 0x02;
1093 }
1094
1095 /*****************************************************************************
1096  * RD only, luma only
1097  *****************************************************************************/
1098 void x264_macroblock_encode_p4x4( x264_t *h, int i4 )
1099 {
1100     int i_qp = h->mb.i_qp;
1101     uint8_t *p_fenc = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[i4]];
1102     uint8_t *p_fdec = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
1103     const int i_ref = h->mb.cache.ref[0][x264_scan8[i4]];
1104     const int mvx   = x264_clip3( h->mb.cache.mv[0][x264_scan8[i4]][0], h->mb.mv_min[0], h->mb.mv_max[0] );
1105     const int mvy   = x264_clip3( h->mb.cache.mv[0][x264_scan8[i4]][1], h->mb.mv_min[1], h->mb.mv_max[1] );
1106     int nz;
1107
1108     h->mc.mc_luma( p_fdec, FDEC_STRIDE, h->mb.pic.p_fref[0][i_ref], h->mb.pic.i_stride[0], mvx + 4*4*block_idx_x[i4], mvy + 4*4*block_idx_y[i4], 4, 4 );
1109
1110     if( h->mb.b_lossless )
1111     {
1112         nz = h->zigzagf.sub_4x4( h->dct.luma4x4[i4], p_fenc, p_fdec );
1113         h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1114     }
1115     else
1116     {
1117         DECLARE_ALIGNED_16( int16_t dct4x4[4][4] );
1118         h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1119         nz = x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 0, i4 );
1120         h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
1121         if( nz )
1122         {
1123             h->zigzagf.scan_4x4( h->dct.luma4x4[i4], dct4x4 );
1124             h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PY], i_qp );
1125             h->dctf.add4x4_idct( p_fdec, dct4x4 );
1126         }
1127     }
1128 }