]> git.sesse.net Git - x264/blob - encoder/macroblock.c
Faster qpel-RD
[x264] / encoder / macroblock.c
1 /*****************************************************************************
2  * macroblock.c: h264 encoder library
3  *****************************************************************************
4  * Copyright (C) 2003-2008 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *****************************************************************************/
24
25 #include "common/common.h"
26 #include "macroblock.h"
27
28 #define ZIG(i,y,x) level[i] = dct[x][y];
29 static inline void zigzag_scan_2x2_dc( int16_t level[4], int16_t dct[2][2] )
30 {
31     ZIG(0,0,0)
32     ZIG(1,0,1)
33     ZIG(2,1,0)
34     ZIG(3,1,1)
35 }
36 #undef ZIG
37
38 static ALWAYS_INLINE void x264_quant_4x4( x264_t *h, int16_t dct[4][4], int i_qp, int i_ctxBlockCat, int b_intra, int idx )
39 {
40     int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY;
41     if( h->mb.b_trellis )
42         x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, i_ctxBlockCat, b_intra, idx );
43     else
44         h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
45 }
46
47 static ALWAYS_INLINE void x264_quant_8x8( x264_t *h, int16_t dct[8][8], int i_qp, int b_intra, int idx )
48 {
49     int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY;
50     if( h->mb.b_trellis )
51         x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx );
52     else
53         h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] );
54 }
55
56 void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp )
57 {
58     uint8_t *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]];
59     uint8_t *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]];
60     DECLARE_ALIGNED_16( int16_t dct4x4[4][4] );
61
62     if( h->mb.b_lossless )
63     {
64         h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst );
65         return;
66     }
67
68     h->dctf.sub4x4_dct( dct4x4, p_src, p_dst );
69
70     x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx );
71
72     if( array_non_zero( dct4x4 ) )
73     {
74         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 );
75         h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp );
76
77         /* output samples to fdec */
78         h->dctf.add4x4_idct( p_dst, dct4x4 );
79     }
80     else
81         memset( h->dct.luma4x4[idx], 0, sizeof(h->dct.luma4x4[idx]));
82 }
83
84 void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp )
85 {
86     int x = 8 * (idx&1);
87     int y = 8 * (idx>>1);
88     uint8_t *p_src = &h->mb.pic.p_fenc[0][x+y*FENC_STRIDE];
89     uint8_t *p_dst = &h->mb.pic.p_fdec[0][x+y*FDEC_STRIDE];
90     DECLARE_ALIGNED_16( int16_t dct8x8[8][8] );
91
92     if( h->mb.b_lossless )
93     {
94         h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst );
95         return;
96     }
97
98     h->dctf.sub8x8_dct8( dct8x8, p_src, p_dst );
99
100     x264_quant_8x8( h, dct8x8, i_qp, 1, idx );
101
102     h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 );
103     h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp );
104     h->dctf.add8x8_idct8( p_dst, dct8x8 );
105 }
106
107 static void x264_mb_encode_i16x16( x264_t *h, int i_qp )
108 {
109     uint8_t  *p_src = h->mb.pic.p_fenc[0];
110     uint8_t  *p_dst = h->mb.pic.p_fdec[0];
111
112     DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] );
113     DECLARE_ALIGNED_16( int16_t dct_dc4x4[4][4] );
114
115     int i;
116
117     if( h->mb.b_lossless )
118     {
119         for( i = 0; i < 16; i++ )
120         {
121             int oe = block_idx_xy_fenc[i];
122             int od = block_idx_xy_fdec[i];
123             h->zigzagf.sub_4x4( h->dct.luma4x4[i], p_src+oe, p_dst+od );
124             dct_dc4x4[0][block_idx_yx_1d[i]] = h->dct.luma4x4[i][0];
125             h->dct.luma4x4[i][0] = 0;
126         }
127         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
128         return;
129     }
130
131     h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
132     for( i = 0; i < 16; i++ )
133     {
134         /* copy dc coeff */
135         dct_dc4x4[0][block_idx_xy_1d[i]] = dct4x4[i][0][0];
136         dct4x4[i][0][0] = 0;
137
138         /* quant/scan/dequant */
139         x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i );
140
141         h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] );
142         h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp );
143     }
144
145     h->dctf.dct4x4dc( dct_dc4x4 );
146     if( h->mb.b_trellis )
147         x264_quant_dc_trellis( h, (int16_t*)dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1);
148     else
149         h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 );
150     h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
151
152     /* output samples to fdec */
153     h->dctf.idct4x4dc( dct_dc4x4 );
154     x264_mb_dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp );  /* XXX not inversed */
155
156     /* calculate dct coeffs */
157     for( i = 0; i < 16; i++ )
158     {
159         /* copy dc coeff */
160         dct4x4[i][0][0] = dct_dc4x4[0][block_idx_xy_1d[i]];
161     }
162     /* put pixels to fdec */
163     h->dctf.add16x16_idct( p_dst, dct4x4 );
164 }
165
166 void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp )
167 {
168     int i, ch;
169     int b_decimate = b_inter && (h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate);
170
171     for( ch = 0; ch < 2; ch++ )
172     {
173         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
174         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
175         int i_decimate_score = 0;
176
177         DECLARE_ALIGNED_16( int16_t dct2x2[2][2]  );
178         DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] );
179
180         if( h->mb.b_lossless )
181         {
182             for( i = 0; i < 4; i++ )
183             {
184                 int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE;
185                 int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE;
186                 h->zigzagf.sub_4x4( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od );
187                 h->dct.chroma_dc[ch][i] = h->dct.luma4x4[16+i+ch*4][0];
188                 h->dct.luma4x4[16+i+ch*4][0] = 0;
189             }
190             continue;
191         }
192
193         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
194         /* calculate dct coeffs */
195         for( i = 0; i < 4; i++ )
196         {
197             /* copy dc coeff */
198             dct2x2[i>>1][i&1] = dct4x4[i][0][0];
199             dct4x4[i][0][0] = 0;
200
201             if( h->mb.b_trellis )
202                 x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 0 );
203             else
204                 h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] );
205             h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] );
206
207             if( b_decimate )
208                 i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] );
209         }
210
211         h->dctf.dct2x2dc( dct2x2 );
212         if( h->mb.b_trellis )
213             x264_quant_dc_trellis( h, (int16_t*)dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter );
214         else
215             h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
216         zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
217
218         /* output samples to fdec */
219         h->dctf.idct2x2dc( dct2x2 );
220         x264_mb_dequant_2x2_dc( dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );  /* XXX not inversed */
221
222         if( b_decimate && i_decimate_score < 7 )
223         {
224             /* Near null chroma 8x8 block so make it null (bits saving) */
225             memset( &h->dct.luma4x4[16+ch*4], 0, 4 * sizeof( *h->dct.luma4x4 ) );
226             if( !array_non_zero( dct2x2 ) )
227                 continue;
228             memset( dct4x4, 0, sizeof( dct4x4 ) );
229         }
230         else
231         {
232             for( i = 0; i < 4; i++ )
233                 h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp );
234         }
235         dct4x4[0][0][0] = dct2x2[0][0];
236         dct4x4[1][0][0] = dct2x2[0][1];
237         dct4x4[2][0][0] = dct2x2[1][0];
238         dct4x4[3][0][0] = dct2x2[1][1];
239         h->dctf.add8x8_idct( p_dst, dct4x4 );
240     }
241
242     /* coded block pattern */
243     h->mb.i_cbp_chroma = 0;
244     for( i = 0; i < 8; i++ )
245     {
246         int nz = array_non_zero( h->dct.luma4x4[16+i] );
247         h->mb.cache.non_zero_count[x264_scan8[16+i]] = nz;
248         h->mb.i_cbp_chroma |= nz;
249     }
250     if( h->mb.i_cbp_chroma )
251         h->mb.i_cbp_chroma = 2;    /* dc+ac (we can't do only ac) */
252     else if( array_non_zero( h->dct.chroma_dc ) )
253         h->mb.i_cbp_chroma = 1;    /* dc only */
254 }
255
256 static void x264_macroblock_encode_skip( x264_t *h )
257 {
258     h->mb.i_cbp_luma = 0x00;
259     h->mb.i_cbp_chroma = 0x00;
260     memset( h->mb.cache.non_zero_count, 0, X264_SCAN8_SIZE );
261     /* store cbp */
262     h->mb.cbp[h->mb.i_mb_xy] = 0;
263 }
264
265 /*****************************************************************************
266  * x264_macroblock_encode_pskip:
267  *  Encode an already marked skip block
268  *****************************************************************************/
269 static void x264_macroblock_encode_pskip( x264_t *h )
270 {
271     const int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
272                                 h->mb.mv_min[0], h->mb.mv_max[0] );
273     const int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
274                                 h->mb.mv_min[1], h->mb.mv_max[1] );
275
276     /* don't do pskip motion compensation if it was already done in macroblock_analyse */
277     if( !h->mb.b_skip_mc )
278     {
279         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
280                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
281                        mvx, mvy, 16, 16 );
282
283         h->mc.mc_chroma( h->mb.pic.p_fdec[1],       FDEC_STRIDE,
284                          h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
285                          mvx, mvy, 8, 8 );
286
287         h->mc.mc_chroma( h->mb.pic.p_fdec[2],       FDEC_STRIDE,
288                          h->mb.pic.p_fref[0][0][5], h->mb.pic.i_stride[2],
289                          mvx, mvy, 8, 8 );
290     }
291
292     x264_macroblock_encode_skip( h );
293 }
294
295 /*****************************************************************************
296  * Intra prediction for predictive lossless mode.
297  *****************************************************************************/
298
299 /* Note that these functions take a shortcut (mc.copy instead of actual pixel prediction) which assumes
300  * that the edge pixels of the reconstructed frame are the same as that of the source frame.  This means
301  * they will only work correctly if the neighboring blocks are losslessly coded.  In practice, this means
302  * lossless mode cannot be mixed with lossy mode within a frame. */
303 /* This can be resolved by explicitly copying the edge pixels after doing the mc.copy, but this doesn't
304  * need to be done unless we decide to allow mixing lossless and lossy compression. */
305
306 void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode )
307 {
308     int stride = h->fenc->i_stride[1] << h->mb.b_interlaced;
309     if( i_mode == I_PRED_CHROMA_V )
310     {
311         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-stride, stride, 8 );
312         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-stride, stride, 8 );
313     }
314     else if( i_mode == I_PRED_CHROMA_H )
315     {
316         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-1, stride, 8 );
317         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-1, stride, 8 );
318     }
319     else
320     {
321         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
322         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
323     }
324 }
325
326 void x264_predict_lossless_4x4( x264_t *h, uint8_t *p_dst, int idx, int i_mode )
327 {
328     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
329     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
330
331     if( i_mode == I_PRED_4x4_V )
332         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
333     else if( i_mode == I_PRED_4x4_H )
334         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
335     else
336         h->predict_4x4[i_mode]( p_dst );
337 }
338
339 void x264_predict_lossless_8x8( x264_t *h, uint8_t *p_dst, int idx, int i_mode, uint8_t edge[33] )
340 {
341     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
342     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
343
344     if( i_mode == I_PRED_8x8_V )
345         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
346     else if( i_mode == I_PRED_8x8_H )
347         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
348     else
349         h->predict_8x8[i_mode]( p_dst, edge );
350 }
351
352 void x264_predict_lossless_16x16( x264_t *h, int i_mode )
353 {
354     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
355     if( i_mode == I_PRED_16x16_V )
356         h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
357     else if( i_mode == I_PRED_16x16_H )
358         h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 );
359     else
360         h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
361 }
362
363 /*****************************************************************************
364  * x264_macroblock_encode:
365  *****************************************************************************/
366 void x264_macroblock_encode( x264_t *h )
367 {
368     int i_cbp_dc = 0;
369     int i_qp = h->mb.i_qp;
370     int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate;
371     int b_force_no_skip = 0;
372     int i,j,idx;
373     uint8_t nnz8x8[4] = {1,1,1,1};
374
375     if( h->sh.b_mbaff
376         && h->mb.i_mb_xy == h->sh.i_first_mb + h->mb.i_mb_stride
377         && IS_SKIP(h->mb.type[h->sh.i_first_mb]) )
378     {
379         /* The first skip is predicted to be a frame mb pair.
380          * We don't yet support the aff part of mbaff, so force it to non-skip
381          * so that we can pick the aff flag. */
382         b_force_no_skip = 1;
383         if( IS_SKIP(h->mb.i_type) )
384         {
385             if( h->mb.i_type == P_SKIP )
386                 h->mb.i_type = P_L0;
387             else if( h->mb.i_type == B_SKIP )
388                 h->mb.i_type = B_DIRECT;
389         }
390     }
391
392     if( h->mb.i_type == P_SKIP )
393     {
394         /* A bit special */
395         x264_macroblock_encode_pskip( h );
396         return;
397     }
398     if( h->mb.i_type == B_SKIP )
399     {
400         /* don't do bskip motion compensation if it was already done in macroblock_analyse */
401         if( !h->mb.b_skip_mc )
402             x264_mb_mc( h );
403         x264_macroblock_encode_skip( h );
404         return;
405     }
406
407     if( h->mb.i_type == I_16x16 )
408     {
409         const int i_mode = h->mb.i_intra16x16_pred_mode;
410         h->mb.b_transform_8x8 = 0;
411
412         if( h->mb.b_lossless )
413             x264_predict_lossless_16x16( h, i_mode );
414         else
415             h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
416
417         /* encode the 16x16 macroblock */
418         x264_mb_encode_i16x16( h, i_qp );
419     }
420     else if( h->mb.i_type == I_8x8 )
421     {
422         DECLARE_ALIGNED_16( uint8_t edge[33] );
423         h->mb.b_transform_8x8 = 1;
424         /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
425         if( h->mb.i_skip_intra )
426         {
427             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
428             /* In RD mode, restore the now-overwritten DCT data. */
429             if( h->mb.i_skip_intra == 2 )
430                 h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
431         }
432         for( i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ )
433         {
434             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE];
435             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
436             x264_predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] );
437
438             if( h->mb.b_lossless )
439                 x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge );
440             else
441                 h->predict_8x8[i_mode]( p_dst, edge );
442
443             x264_mb_encode_i8x8( h, i, i_qp );
444         }
445         for( i = 0; i < 4; i++ )
446             nnz8x8[i] = array_non_zero( h->dct.luma8x8[i] );
447     }
448     else if( h->mb.i_type == I_4x4 )
449     {
450         h->mb.b_transform_8x8 = 0;
451         /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
452         if( h->mb.i_skip_intra )
453         {
454             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
455             /* In RD mode, restore the now-overwritten DCT data. */
456             if( h->mb.i_skip_intra == 2 )
457                 h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
458         }
459         for( i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ )
460         {
461             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]];
462             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
463
464             if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
465                 /* emulate missing topright samples */
466                 *(uint32_t*) &p_dst[4-FDEC_STRIDE] = p_dst[3-FDEC_STRIDE] * 0x01010101U;
467
468             if( h->mb.b_lossless )
469                 x264_predict_lossless_4x4( h, p_dst, i, i_mode );
470             else
471                 h->predict_4x4[i_mode]( p_dst );
472             x264_mb_encode_i4x4( h, i, i_qp );
473         }
474     }
475     else    /* Inter MB */
476     {
477         int i8x8, i4x4;
478         int i_decimate_mb = 0;
479
480         /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
481         if( !h->mb.b_skip_mc )
482             x264_mb_mc( h );
483
484         if( h->mb.b_lossless )
485         {
486             if( h->mb.b_transform_8x8 )
487                 for( i8x8 = 0; i8x8 < 4; i8x8++ )
488                 {
489                     int x = 8*(i8x8&1);
490                     int y = 8*(i8x8>>1);
491                     h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8],
492                                         h->mb.pic.p_fenc[0]+x+y*FENC_STRIDE,
493                                         h->mb.pic.p_fdec[0]+x+y*FDEC_STRIDE );
494                     nnz8x8[i8x8] = array_non_zero( h->dct.luma8x8[i8x8] );
495                 }
496             else
497                 for( i4x4 = 0; i4x4 < 16; i4x4++ )
498                 {
499                     h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4],
500                                         h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4],
501                                         h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] );
502                 }
503         }
504         else if( h->mb.b_transform_8x8 )
505         {
506             DECLARE_ALIGNED_16( int16_t dct8x8[4][8][8] );
507             b_decimate &= !h->mb.b_trellis; // 8x8 trellis is inherently optimal decimation
508             h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
509             h->nr_count[1] += h->mb.b_noise_reduction * 4;
510
511             for( idx = 0; idx < 4; idx++ )
512             {
513                 if( h->mb.b_noise_reduction )
514                     h->quantf.denoise_dct( *dct8x8[idx], h->nr_residual_sum[1], h->nr_offset[1], 64 );
515                 x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx );
516
517                 h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] );
518
519                 if( b_decimate )
520                 {
521                     int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] );
522                     i_decimate_mb += i_decimate_8x8;
523                     if( i_decimate_8x8 < 4 )
524                         nnz8x8[idx] = 0;
525                 }
526                 else
527                     nnz8x8[idx] = array_non_zero( dct8x8[idx] );
528             }
529
530             if( i_decimate_mb < 6 && b_decimate )
531                 *(uint32_t*)nnz8x8 = 0;
532             else
533             {
534                 for( idx = 0; idx < 4; idx++ )
535                     if( nnz8x8[idx] )
536                     {
537                         h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
538                         h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][(idx&1)*8 + (idx>>1)*8*FDEC_STRIDE], dct8x8[idx] );
539                     }
540             }
541         }
542         else
543         {
544             DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] );
545             h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
546             h->nr_count[0] += h->mb.b_noise_reduction * 16;
547
548             for( i8x8 = 0; i8x8 < 4; i8x8++ )
549             {
550                 int i_decimate_8x8;
551
552                 /* encode one 4x4 block */
553                 i_decimate_8x8 = 0;
554                 for( i4x4 = 0; i4x4 < 4; i4x4++ )
555                 {
556                     idx = i8x8 * 4 + i4x4;
557
558                     if( h->mb.b_noise_reduction )
559                         h->quantf.denoise_dct( *dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 );
560                     x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx );
561
562                     h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] );
563
564                     if( b_decimate && i_decimate_8x8 <= 6 )
565                         i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] );
566                 }
567
568                 /* decimate this 8x8 block */
569                 i_decimate_mb += i_decimate_8x8;
570                 if( i_decimate_8x8 < 4 && b_decimate )
571                     nnz8x8[i8x8] = 0;
572             }
573
574             if( i_decimate_mb < 6 && b_decimate )
575                 *(uint32_t*)nnz8x8 = 0;
576             else
577             {
578                 for( i8x8 = 0; i8x8 < 4; i8x8++ )
579                     if( nnz8x8[i8x8] )
580                     {
581                         for( i = 0; i < 4; i++ )
582                             h->quantf.dequant_4x4( dct4x4[i8x8*4+i], h->dequant4_mf[CQM_4PY], i_qp );
583                         h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
584                     }
585             }
586         }
587     }
588
589     /* encode chroma */
590     if( IS_INTRA( h->mb.i_type ) )
591     {
592         const int i_mode = h->mb.i_chroma_pred_mode;
593         if( h->mb.b_lossless )
594             x264_predict_lossless_8x8_chroma( h, i_mode );
595         else
596         {
597             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
598             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
599         }
600     }
601
602     /* encode the 8x8 blocks */
603     x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
604
605     /* coded block pattern and non_zero_count */
606     h->mb.i_cbp_luma = 0x00;
607     if( h->mb.i_type == I_16x16 )
608     {
609         for( i = 0; i < 16; i++ )
610         {
611             int nz = array_non_zero( h->dct.luma4x4[i] );
612             h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
613             h->mb.i_cbp_luma |= nz;
614         }
615         h->mb.i_cbp_luma *= 0xf;
616     }
617     else
618     {
619         for( i = 0; i < 4; i++)
620         {
621             if(!nnz8x8[i])
622             {
623                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[0+i*4]] = 0;
624                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[2+i*4]] = 0;
625             }
626             else if( h->mb.b_transform_8x8 )
627             {
628                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[0+4*i]] = nnz8x8[i] * 0x0101;
629                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[2+4*i]] = nnz8x8[i] * 0x0101;
630                 h->mb.i_cbp_luma |= nnz8x8[i] << i;
631             }
632             else
633             {
634                 int nz, cbp = 0;
635                 for( j = 0; j < 4; j++ )
636                 {
637                     nz = array_non_zero( h->dct.luma4x4[j+4*i] );
638                     h->mb.cache.non_zero_count[x264_scan8[j+4*i]] = nz;
639                     cbp |= nz;
640                 }
641                 h->mb.i_cbp_luma |= cbp << i;
642             }
643         }
644     }
645
646     if( h->param.b_cabac )
647     {
648         i_cbp_dc = ( h->mb.i_type == I_16x16 && array_non_zero( h->dct.luma16x16_dc ) )
649                  | array_non_zero( h->dct.chroma_dc[0] ) << 1
650                  | array_non_zero( h->dct.chroma_dc[1] ) << 2;
651     }
652
653     /* store cbp */
654     h->mb.cbp[h->mb.i_mb_xy] = (i_cbp_dc << 8) | (h->mb.i_cbp_chroma << 4) | h->mb.i_cbp_luma;
655
656     /* Check for P_SKIP
657      * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
658      *      (if multiple mv give same result)*/
659     if( !b_force_no_skip )
660     {
661         if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
662             !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
663             *(uint32_t*)h->mb.cache.mv[0][x264_scan8[0]] == *(uint32_t*)h->mb.cache.pskip_mv
664             && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
665         {
666             h->mb.i_type = P_SKIP;
667         }
668
669         /* Check for B_SKIP */
670         if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
671         {
672             h->mb.i_type = B_SKIP;
673         }
674     }
675 }
676
677 /*****************************************************************************
678  * x264_macroblock_probe_skip:
679  *  Check if the current MB could be encoded as a [PB]_SKIP (it supposes you use
680  *  the previous QP
681  *****************************************************************************/
682 int x264_macroblock_probe_skip( x264_t *h, const int b_bidir )
683 {
684     DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] );
685     DECLARE_ALIGNED_16( int16_t dct2x2[2][2] );
686     DECLARE_ALIGNED_16( int16_t dctscan[16] );
687
688     int i_qp = h->mb.i_qp;
689     int mvp[2];
690     int ch, thresh;
691
692     int i8x8, i4x4;
693     int i_decimate_mb;
694
695     if( !b_bidir )
696     {
697         /* Get the MV */
698         mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
699         mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
700
701         /* Motion compensation */
702         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
703                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
704                        mvp[0], mvp[1], 16, 16 );
705     }
706
707     for( i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
708     {
709         int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
710         int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
711         /* get luma diff */
712         h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset,
713                                     h->mb.pic.p_fdec[0] + fdec_offset );
714         /* encode one 4x4 block */
715         for( i4x4 = 0; i4x4 < 4; i4x4++ )
716         {
717             h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] );
718             if( !array_non_zero(dct4x4[i4x4]) )
719                 continue;
720             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
721             i_decimate_mb += h->quantf.decimate_score16( dctscan );
722             if( i_decimate_mb >= 6 )
723                 return 0;
724         }
725     }
726
727     /* encode chroma */
728     i_qp = h->mb.i_chroma_qp;
729     thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
730
731     for( ch = 0; ch < 2; ch++ )
732     {
733         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
734         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
735
736         if( !b_bidir )
737         {
738             h->mc.mc_chroma( h->mb.pic.p_fdec[1+ch],       FDEC_STRIDE,
739                              h->mb.pic.p_fref[0][0][4+ch], h->mb.pic.i_stride[1+ch],
740                              mvp[0], mvp[1], 8, 8 );
741         }
742
743         /* there is almost never a termination during chroma, but we can't avoid the check entirely */
744         /* so instead we check SSD and skip the actual check if the score is low enough. */
745         if( h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE ) < thresh )
746             continue;
747
748         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
749
750         /* calculate dct DC */
751         dct2x2[0][0] = dct4x4[0][0][0];
752         dct2x2[0][1] = dct4x4[1][0][0];
753         dct2x2[1][0] = dct4x4[2][0][0];
754         dct2x2[1][1] = dct4x4[3][0][0];
755         h->dctf.dct2x2dc( dct2x2 );
756         h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 );
757         if( array_non_zero(dct2x2) )
758             return 0;
759
760         /* calculate dct coeffs */
761         for( i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ )
762         {
763             dct4x4[i4x4][0][0] = 0;
764             h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
765             if( !array_non_zero(dct4x4[i4x4]) )
766                 continue;
767             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
768             i_decimate_mb += h->quantf.decimate_score15( dctscan );
769             if( i_decimate_mb >= 7 )
770                 return 0;
771         }
772     }
773
774     h->mb.b_skip_mc = 1;
775     return 1;
776 }
777
778 /****************************************************************************
779  * DCT-domain noise reduction / adaptive deadzone
780  * from libavcodec
781  ****************************************************************************/
782
783 void x264_noise_reduction_update( x264_t *h )
784 {
785     int cat, i;
786     for( cat = 0; cat < 2; cat++ )
787     {
788         int size = cat ? 64 : 16;
789         const uint16_t *weight = cat ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
790
791         if( h->nr_count[cat] > (cat ? (1<<16) : (1<<18)) )
792         {
793             for( i = 0; i < size; i++ )
794                 h->nr_residual_sum[cat][i] >>= 1;
795             h->nr_count[cat] >>= 1;
796         }
797
798         for( i = 0; i < size; i++ )
799             h->nr_offset[cat][i] =
800                 ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
801                  + h->nr_residual_sum[cat][i]/2)
802               / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
803     }
804 }
805
806 /*****************************************************************************
807  * RD only; 4 calls to this do not make up for one macroblock_encode.
808  * doesn't transform chroma dc.
809  *****************************************************************************/
810 void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
811 {
812     int i_qp = h->mb.i_qp;
813     uint8_t *p_fenc = h->mb.pic.p_fenc[0] + (i8&1)*8 + (i8>>1)*8*FENC_STRIDE;
814     uint8_t *p_fdec = h->mb.pic.p_fdec[0] + (i8&1)*8 + (i8>>1)*8*FDEC_STRIDE;
815     int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate;
816     int nnz8x8 = 0;
817     int ch;
818
819     x264_mb_mc_8x8( h, i8 );
820
821     if( h->mb.b_lossless )
822     {
823         int i4;
824         if( h->mb.b_transform_8x8 )
825         {
826             h->zigzagf.sub_8x8( h->dct.luma8x8[i8], p_fenc, p_fdec );
827             nnz8x8 = array_non_zero( h->dct.luma8x8[i8] );
828         }
829         else
830         {
831             for( i4 = i8*4; i4 < i8*4+4; i4++ )
832             {
833                 h->zigzagf.sub_4x4( h->dct.luma4x4[i4],
834                                     h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4],
835                                     h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4] );
836                 nnz8x8 |= array_non_zero( h->dct.luma4x4[i4] );
837             }
838         }
839         for( ch = 0; ch < 2; ch++ )
840         {
841             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
842             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
843             h->zigzagf.sub_4x4( h->dct.luma4x4[16+i8+ch*4], p_fenc, p_fdec );
844             h->dct.luma4x4[16+i8+ch*4][0] = 0;
845         }
846     }
847     else
848     {
849         if( h->mb.b_transform_8x8 )
850         {
851             DECLARE_ALIGNED_16( int16_t dct8x8[8][8] );
852             h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec );
853             x264_quant_8x8( h, dct8x8, i_qp, 0, i8 );
854             h->zigzagf.scan_8x8( h->dct.luma8x8[i8], dct8x8 );
855
856             if( b_decimate && !h->mb.b_trellis )
857                 nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[i8] );
858             else
859                 nnz8x8 = array_non_zero( dct8x8 );
860
861             if( nnz8x8 )
862             {
863                 h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8PY], i_qp );
864                 h->dctf.add8x8_idct8( p_fdec, dct8x8 );
865             }
866         }
867         else
868         {
869             int i4;
870             DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] );
871             h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec );
872             for( i4 = 0; i4 < 4; i4++ )
873                 x264_quant_4x4( h, dct4x4[i4], i_qp, DCT_LUMA_4x4, 0, i8*4+i4 );
874
875             for( i4 = 0; i4 < 4; i4++ )
876                 h->zigzagf.scan_4x4( h->dct.luma4x4[i8*4+i4], dct4x4[i4] );
877
878             if( b_decimate )
879             {
880                 int i_decimate_8x8 = 0;
881                 for( i4 = 0; i4 < 4 && i_decimate_8x8 < 4; i4++ )
882                     i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[i8*4+i4] );
883                 nnz8x8 = 4 <= i_decimate_8x8;
884             }
885             else
886                 nnz8x8 = array_non_zero( dct4x4 );
887
888             if( nnz8x8 )
889             {
890                 for( i4 = 0; i4 < 4; i4++ )
891                     h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[CQM_4PY], i_qp );
892                 h->dctf.add8x8_idct( p_fdec, dct4x4 );
893             }
894         }
895
896         i_qp = h->mb.i_chroma_qp;
897
898         for( ch = 0; ch < 2; ch++ )
899         {
900             DECLARE_ALIGNED_16( int16_t dct4x4[4][4] );
901             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
902             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
903
904             h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
905             dct4x4[0][0] = 0;
906
907             if( h->mb.b_trellis )
908                 x264_quant_4x4_trellis( h, dct4x4, CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 0 );
909             else
910                 h->quantf.quant_4x4( dct4x4, h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
911
912             h->zigzagf.scan_4x4( h->dct.luma4x4[16+i8+ch*4], dct4x4 );
913             if( array_non_zero( dct4x4 ) )
914             {
915                 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PC], i_qp );
916                 h->dctf.add4x4_idct( p_fdec, dct4x4 );
917             }
918         }
919     }
920     h->mb.i_cbp_luma &= ~(1 << i8);
921     h->mb.i_cbp_luma |= nnz8x8 << i8;
922     h->mb.i_cbp_chroma = 0x02;
923 }