]> git.sesse.net Git - x264/blob - encoder/macroblock.c
More small speed tweaks to macroblock.c
[x264] / encoder / macroblock.c
1 /*****************************************************************************
2  * macroblock.c: h264 encoder library
3  *****************************************************************************
4  * Copyright (C) 2003-2008 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *****************************************************************************/
24
25 #include "common/common.h"
26 #include "macroblock.h"
27
28 /* These chroma DC functions don't have assembly versions and are only used here. */
29
30 #define ZIG(i,y,x) level[i] = dct[x][y];
31 static inline void zigzag_scan_2x2_dc( int16_t level[4], int16_t dct[2][2] )
32 {
33     ZIG(0,0,0)
34     ZIG(1,0,1)
35     ZIG(2,1,0)
36     ZIG(3,1,1)
37 }
38 #undef ZIG
39
40 static inline void idct_dequant_2x2_dc( int16_t dct[2][2], int16_t dct4x4[4][4][4], int dequant_mf[6][4][4], int i_qp )
41 {
42     int d0 = dct[0][0] + dct[0][1];
43     int d1 = dct[1][0] + dct[1][1];
44     int d2 = dct[0][0] - dct[0][1];
45     int d3 = dct[1][0] - dct[1][1];
46     int dmf = dequant_mf[i_qp%6][0][0];
47     int qbits = i_qp/6 - 5;
48     if( qbits > 0 )
49     {
50         dmf <<= qbits;
51         qbits = 0;
52     }
53     dct4x4[0][0][0] = (d0 + d1) * dmf >> -qbits;
54     dct4x4[1][0][0] = (d0 - d1) * dmf >> -qbits;
55     dct4x4[2][0][0] = (d2 + d3) * dmf >> -qbits;
56     dct4x4[3][0][0] = (d2 - d3) * dmf >> -qbits;
57 }
58
59 static inline void dct2x2dc( int16_t d[2][2], int16_t dct4x4[4][4][4] )
60 {
61     int d0 = dct4x4[0][0][0] + dct4x4[1][0][0];
62     int d1 = dct4x4[2][0][0] + dct4x4[3][0][0];
63     int d2 = dct4x4[0][0][0] - dct4x4[1][0][0];
64     int d3 = dct4x4[2][0][0] - dct4x4[3][0][0];
65     d[0][0] = d0 + d1;
66     d[1][0] = d2 + d3;
67     d[0][1] = d0 - d1;
68     d[1][1] = d2 - d3;
69     dct4x4[0][0][0] = 0;
70     dct4x4[1][0][0] = 0;
71     dct4x4[2][0][0] = 0;
72     dct4x4[3][0][0] = 0;
73 }
74
75 static ALWAYS_INLINE void x264_quant_4x4( x264_t *h, int16_t dct[4][4], int i_qp, int i_ctxBlockCat, int b_intra, int idx )
76 {
77     int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY;
78     if( h->mb.b_trellis )
79         x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, i_ctxBlockCat, b_intra, idx );
80     else
81         h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
82 }
83
84 static ALWAYS_INLINE void x264_quant_8x8( x264_t *h, int16_t dct[8][8], int i_qp, int b_intra, int idx )
85 {
86     int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY;
87     if( h->mb.b_trellis )
88         x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx );
89     else
90         h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] );
91 }
92
93 void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp )
94 {
95     uint8_t *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]];
96     uint8_t *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]];
97     DECLARE_ALIGNED_16( int16_t dct4x4[4][4] );
98
99     if( h->mb.b_lossless )
100     {
101         h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst );
102         return;
103     }
104
105     h->dctf.sub4x4_dct( dct4x4, p_src, p_dst );
106
107     x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx );
108
109     if( array_non_zero( dct4x4 ) )
110     {
111         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 );
112         h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp );
113
114         /* output samples to fdec */
115         h->dctf.add4x4_idct( p_dst, dct4x4 );
116     }
117     else
118         memset( h->dct.luma4x4[idx], 0, sizeof(h->dct.luma4x4[idx]));
119 }
120
121 void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp )
122 {
123     int x = 8 * (idx&1);
124     int y = 8 * (idx>>1);
125     uint8_t *p_src = &h->mb.pic.p_fenc[0][x+y*FENC_STRIDE];
126     uint8_t *p_dst = &h->mb.pic.p_fdec[0][x+y*FDEC_STRIDE];
127     DECLARE_ALIGNED_16( int16_t dct8x8[8][8] );
128
129     if( h->mb.b_lossless )
130     {
131         h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst );
132         return;
133     }
134
135     h->dctf.sub8x8_dct8( dct8x8, p_src, p_dst );
136
137     x264_quant_8x8( h, dct8x8, i_qp, 1, idx );
138
139     h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 );
140     h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp );
141     h->dctf.add8x8_idct8( p_dst, dct8x8 );
142 }
143
144 static void x264_mb_encode_i16x16( x264_t *h, int i_qp )
145 {
146     uint8_t  *p_src = h->mb.pic.p_fenc[0];
147     uint8_t  *p_dst = h->mb.pic.p_fdec[0];
148
149     DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] );
150     DECLARE_ALIGNED_16( int16_t dct_dc4x4[4][4] );
151
152     int i;
153
154     if( h->mb.b_lossless )
155     {
156         for( i = 0; i < 16; i++ )
157         {
158             int oe = block_idx_xy_fenc[i];
159             int od = block_idx_xy_fdec[i];
160             h->zigzagf.sub_4x4( h->dct.luma4x4[i], p_src+oe, p_dst+od );
161             dct_dc4x4[0][block_idx_yx_1d[i]] = h->dct.luma4x4[i][0];
162             h->dct.luma4x4[i][0] = 0;
163         }
164         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
165         return;
166     }
167
168     h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
169     for( i = 0; i < 16; i++ )
170     {
171         /* copy dc coeff */
172         dct_dc4x4[0][block_idx_xy_1d[i]] = dct4x4[i][0][0];
173         dct4x4[i][0][0] = 0;
174
175         /* quant/scan/dequant */
176         x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i );
177
178         h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] );
179         h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp );
180     }
181
182     h->dctf.dct4x4dc( dct_dc4x4 );
183     if( h->mb.b_trellis )
184         x264_quant_dc_trellis( h, (int16_t*)dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1);
185     else
186         h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 );
187     h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
188
189     /* output samples to fdec */
190     h->dctf.idct4x4dc( dct_dc4x4 );
191     h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp );  /* XXX not inversed */
192
193     /* calculate dct coeffs */
194     for( i = 0; i < 16; i++ )
195     {
196         /* copy dc coeff */
197         dct4x4[i][0][0] = dct_dc4x4[0][block_idx_xy_1d[i]];
198     }
199     /* put pixels to fdec */
200     h->dctf.add16x16_idct( p_dst, dct4x4 );
201 }
202
203 void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp )
204 {
205     int i, ch;
206     int b_decimate = b_inter && (h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate);
207
208     for( ch = 0; ch < 2; ch++ )
209     {
210         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
211         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
212         int i_decimate_score = 0;
213
214         DECLARE_ALIGNED_16( int16_t dct2x2[2][2]  );
215         DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] );
216
217         if( h->mb.b_lossless )
218         {
219             for( i = 0; i < 4; i++ )
220             {
221                 int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE;
222                 int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE;
223                 h->zigzagf.sub_4x4( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od );
224                 h->dct.chroma_dc[ch][i] = h->dct.luma4x4[16+i+ch*4][0];
225                 h->dct.luma4x4[16+i+ch*4][0] = 0;
226             }
227             continue;
228         }
229
230         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
231         dct2x2dc( dct2x2, dct4x4 );
232         /* calculate dct coeffs */
233         for( i = 0; i < 4; i++ )
234         {
235             if( h->mb.b_trellis )
236                 x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 0 );
237             else
238                 h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] );
239             h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] );
240
241             if( b_decimate )
242                 i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] );
243         }
244
245         if( h->mb.b_trellis )
246             x264_quant_dc_trellis( h, (int16_t*)dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter );
247         else
248             h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
249
250         if( b_decimate && i_decimate_score < 7 )
251         {
252             /* Near null chroma 8x8 block so make it null (bits saving) */
253             memset( &h->dct.luma4x4[16+ch*4], 0, 4 * sizeof( *h->dct.luma4x4 ) );
254             if( !array_non_zero( dct2x2 ) )
255             {
256                 memset( h->dct.chroma_dc[ch], 0, sizeof( h->dct.chroma_dc[ch] ) );
257                 continue;
258             }
259             memset( dct4x4, 0, sizeof( dct4x4 ) );
260         }
261         else
262         {
263             for( i = 0; i < 4; i++ )
264                 h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp );
265         }
266
267         zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
268         idct_dequant_2x2_dc( dct2x2, dct4x4, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
269         h->dctf.add8x8_idct( p_dst, dct4x4 );
270     }
271
272     /* coded block pattern */
273     h->mb.i_cbp_chroma = 0;
274     for( i = 0; i < 8; i++ )
275     {
276         int nz = array_non_zero( h->dct.luma4x4[16+i] );
277         h->mb.cache.non_zero_count[x264_scan8[16+i]] = nz;
278         h->mb.i_cbp_chroma |= nz;
279     }
280     h->mb.cache.non_zero_count[x264_scan8[25]] = array_non_zero( h->dct.chroma_dc[0] );
281     h->mb.cache.non_zero_count[x264_scan8[26]] = array_non_zero( h->dct.chroma_dc[1] );
282     if( h->mb.i_cbp_chroma )
283         h->mb.i_cbp_chroma = 2;    /* dc+ac (we can't do only ac) */
284     else if( h->mb.cache.non_zero_count[x264_scan8[25]] |
285              h->mb.cache.non_zero_count[x264_scan8[26]] )
286         h->mb.i_cbp_chroma = 1;    /* dc only */
287 }
288
289 static void x264_macroblock_encode_skip( x264_t *h )
290 {
291     h->mb.i_cbp_luma = 0x00;
292     h->mb.i_cbp_chroma = 0x00;
293     memset( h->mb.cache.non_zero_count, 0, X264_SCAN8_SIZE );
294     /* store cbp */
295     h->mb.cbp[h->mb.i_mb_xy] = 0;
296 }
297
298 /*****************************************************************************
299  * x264_macroblock_encode_pskip:
300  *  Encode an already marked skip block
301  *****************************************************************************/
302 static void x264_macroblock_encode_pskip( x264_t *h )
303 {
304     const int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
305                                 h->mb.mv_min[0], h->mb.mv_max[0] );
306     const int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
307                                 h->mb.mv_min[1], h->mb.mv_max[1] );
308
309     /* don't do pskip motion compensation if it was already done in macroblock_analyse */
310     if( !h->mb.b_skip_mc )
311     {
312         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
313                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
314                        mvx, mvy, 16, 16 );
315
316         h->mc.mc_chroma( h->mb.pic.p_fdec[1],       FDEC_STRIDE,
317                          h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
318                          mvx, mvy, 8, 8 );
319
320         h->mc.mc_chroma( h->mb.pic.p_fdec[2],       FDEC_STRIDE,
321                          h->mb.pic.p_fref[0][0][5], h->mb.pic.i_stride[2],
322                          mvx, mvy, 8, 8 );
323     }
324
325     x264_macroblock_encode_skip( h );
326 }
327
328 /*****************************************************************************
329  * Intra prediction for predictive lossless mode.
330  *****************************************************************************/
331
332 /* Note that these functions take a shortcut (mc.copy instead of actual pixel prediction) which assumes
333  * that the edge pixels of the reconstructed frame are the same as that of the source frame.  This means
334  * they will only work correctly if the neighboring blocks are losslessly coded.  In practice, this means
335  * lossless mode cannot be mixed with lossy mode within a frame. */
336 /* This can be resolved by explicitly copying the edge pixels after doing the mc.copy, but this doesn't
337  * need to be done unless we decide to allow mixing lossless and lossy compression. */
338
339 void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode )
340 {
341     int stride = h->fenc->i_stride[1] << h->mb.b_interlaced;
342     if( i_mode == I_PRED_CHROMA_V )
343     {
344         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-stride, stride, 8 );
345         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-stride, stride, 8 );
346     }
347     else if( i_mode == I_PRED_CHROMA_H )
348     {
349         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-1, stride, 8 );
350         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-1, stride, 8 );
351     }
352     else
353     {
354         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
355         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
356     }
357 }
358
359 void x264_predict_lossless_4x4( x264_t *h, uint8_t *p_dst, int idx, int i_mode )
360 {
361     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
362     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
363
364     if( i_mode == I_PRED_4x4_V )
365         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
366     else if( i_mode == I_PRED_4x4_H )
367         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
368     else
369         h->predict_4x4[i_mode]( p_dst );
370 }
371
372 void x264_predict_lossless_8x8( x264_t *h, uint8_t *p_dst, int idx, int i_mode, uint8_t edge[33] )
373 {
374     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
375     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
376
377     if( i_mode == I_PRED_8x8_V )
378         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
379     else if( i_mode == I_PRED_8x8_H )
380         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
381     else
382         h->predict_8x8[i_mode]( p_dst, edge );
383 }
384
385 void x264_predict_lossless_16x16( x264_t *h, int i_mode )
386 {
387     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
388     if( i_mode == I_PRED_16x16_V )
389         h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
390     else if( i_mode == I_PRED_16x16_H )
391         h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 );
392     else
393         h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
394 }
395
396 /*****************************************************************************
397  * x264_macroblock_encode:
398  *****************************************************************************/
399 void x264_macroblock_encode( x264_t *h )
400 {
401     int i_cbp_dc = 0;
402     int i_qp = h->mb.i_qp;
403     int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate;
404     int b_force_no_skip = 0;
405     int i,j,idx;
406     uint8_t nnz8x8[4] = {1,1,1,1};
407
408     if( h->sh.b_mbaff
409         && h->mb.i_mb_xy == h->sh.i_first_mb + h->mb.i_mb_stride
410         && IS_SKIP(h->mb.type[h->sh.i_first_mb]) )
411     {
412         /* The first skip is predicted to be a frame mb pair.
413          * We don't yet support the aff part of mbaff, so force it to non-skip
414          * so that we can pick the aff flag. */
415         b_force_no_skip = 1;
416         if( IS_SKIP(h->mb.i_type) )
417         {
418             if( h->mb.i_type == P_SKIP )
419                 h->mb.i_type = P_L0;
420             else if( h->mb.i_type == B_SKIP )
421                 h->mb.i_type = B_DIRECT;
422         }
423     }
424
425     if( h->mb.i_type == P_SKIP )
426     {
427         /* A bit special */
428         x264_macroblock_encode_pskip( h );
429         return;
430     }
431     if( h->mb.i_type == B_SKIP )
432     {
433         /* don't do bskip motion compensation if it was already done in macroblock_analyse */
434         if( !h->mb.b_skip_mc )
435             x264_mb_mc( h );
436         x264_macroblock_encode_skip( h );
437         return;
438     }
439
440     if( h->mb.i_type == I_16x16 )
441     {
442         const int i_mode = h->mb.i_intra16x16_pred_mode;
443         h->mb.b_transform_8x8 = 0;
444
445         if( h->mb.b_lossless )
446             x264_predict_lossless_16x16( h, i_mode );
447         else
448             h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
449
450         /* encode the 16x16 macroblock */
451         x264_mb_encode_i16x16( h, i_qp );
452     }
453     else if( h->mb.i_type == I_8x8 )
454     {
455         DECLARE_ALIGNED_16( uint8_t edge[33] );
456         h->mb.b_transform_8x8 = 1;
457         /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
458         if( h->mb.i_skip_intra )
459         {
460             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
461             /* In RD mode, restore the now-overwritten DCT data. */
462             if( h->mb.i_skip_intra == 2 )
463                 h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
464         }
465         for( i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ )
466         {
467             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE];
468             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
469             x264_predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] );
470
471             if( h->mb.b_lossless )
472                 x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge );
473             else
474                 h->predict_8x8[i_mode]( p_dst, edge );
475
476             x264_mb_encode_i8x8( h, i, i_qp );
477         }
478         for( i = 0; i < 4; i++ )
479             nnz8x8[i] = array_non_zero( h->dct.luma8x8[i] );
480     }
481     else if( h->mb.i_type == I_4x4 )
482     {
483         h->mb.b_transform_8x8 = 0;
484         /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
485         if( h->mb.i_skip_intra )
486         {
487             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
488             /* In RD mode, restore the now-overwritten DCT data. */
489             if( h->mb.i_skip_intra == 2 )
490                 h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
491         }
492         for( i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ )
493         {
494             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]];
495             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
496
497             if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
498                 /* emulate missing topright samples */
499                 *(uint32_t*) &p_dst[4-FDEC_STRIDE] = p_dst[3-FDEC_STRIDE] * 0x01010101U;
500
501             if( h->mb.b_lossless )
502                 x264_predict_lossless_4x4( h, p_dst, i, i_mode );
503             else
504                 h->predict_4x4[i_mode]( p_dst );
505             x264_mb_encode_i4x4( h, i, i_qp );
506         }
507     }
508     else    /* Inter MB */
509     {
510         int i8x8, i4x4;
511         int i_decimate_mb = 0;
512
513         /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
514         if( !h->mb.b_skip_mc )
515             x264_mb_mc( h );
516
517         if( h->mb.b_lossless )
518         {
519             if( h->mb.b_transform_8x8 )
520                 for( i8x8 = 0; i8x8 < 4; i8x8++ )
521                 {
522                     int x = 8*(i8x8&1);
523                     int y = 8*(i8x8>>1);
524                     h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8],
525                                         h->mb.pic.p_fenc[0]+x+y*FENC_STRIDE,
526                                         h->mb.pic.p_fdec[0]+x+y*FDEC_STRIDE );
527                     nnz8x8[i8x8] = array_non_zero( h->dct.luma8x8[i8x8] );
528                 }
529             else
530                 for( i4x4 = 0; i4x4 < 16; i4x4++ )
531                 {
532                     h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4],
533                                         h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4],
534                                         h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] );
535                 }
536         }
537         else if( h->mb.b_transform_8x8 )
538         {
539             DECLARE_ALIGNED_16( int16_t dct8x8[4][8][8] );
540             b_decimate &= !h->mb.b_trellis; // 8x8 trellis is inherently optimal decimation
541             h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
542             h->nr_count[1] += h->mb.b_noise_reduction * 4;
543
544             for( idx = 0; idx < 4; idx++ )
545             {
546                 if( h->mb.b_noise_reduction )
547                     h->quantf.denoise_dct( *dct8x8[idx], h->nr_residual_sum[1], h->nr_offset[1], 64 );
548                 x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx );
549
550                 h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] );
551
552                 if( b_decimate )
553                 {
554                     int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] );
555                     i_decimate_mb += i_decimate_8x8;
556                     if( i_decimate_8x8 < 4 )
557                         nnz8x8[idx] = 0;
558                 }
559                 else
560                     nnz8x8[idx] = array_non_zero( dct8x8[idx] );
561             }
562
563             if( i_decimate_mb < 6 && b_decimate )
564                 *(uint32_t*)nnz8x8 = 0;
565             else
566             {
567                 for( idx = 0; idx < 4; idx++ )
568                     if( nnz8x8[idx] )
569                     {
570                         h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
571                         h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][(idx&1)*8 + (idx>>1)*8*FDEC_STRIDE], dct8x8[idx] );
572                     }
573             }
574         }
575         else
576         {
577             DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] );
578             h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
579             h->nr_count[0] += h->mb.b_noise_reduction * 16;
580
581             for( i8x8 = 0; i8x8 < 4; i8x8++ )
582             {
583                 int i_decimate_8x8;
584
585                 /* encode one 4x4 block */
586                 i_decimate_8x8 = 0;
587                 for( i4x4 = 0; i4x4 < 4; i4x4++ )
588                 {
589                     idx = i8x8 * 4 + i4x4;
590
591                     if( h->mb.b_noise_reduction )
592                         h->quantf.denoise_dct( *dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 );
593                     x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx );
594
595                     h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] );
596
597                     if( b_decimate && i_decimate_8x8 < 6 )
598                         i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] );
599                 }
600
601                 /* decimate this 8x8 block */
602                 i_decimate_mb += i_decimate_8x8;
603                 if( i_decimate_8x8 < 4 && b_decimate )
604                     nnz8x8[i8x8] = 0;
605             }
606
607             if( i_decimate_mb < 6 && b_decimate )
608                 *(uint32_t*)nnz8x8 = 0;
609             else
610             {
611                 for( i8x8 = 0; i8x8 < 4; i8x8++ )
612                     if( nnz8x8[i8x8] )
613                     {
614                         for( i = 0; i < 4; i++ )
615                             h->quantf.dequant_4x4( dct4x4[i8x8*4+i], h->dequant4_mf[CQM_4PY], i_qp );
616                         h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
617                     }
618             }
619         }
620     }
621
622     /* encode chroma */
623     if( IS_INTRA( h->mb.i_type ) )
624     {
625         const int i_mode = h->mb.i_chroma_pred_mode;
626         if( h->mb.b_lossless )
627             x264_predict_lossless_8x8_chroma( h, i_mode );
628         else
629         {
630             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
631             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
632         }
633     }
634
635     /* encode the 8x8 blocks */
636     x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
637
638     /* coded block pattern and non_zero_count */
639     h->mb.i_cbp_luma = 0x00;
640     if( h->mb.i_type == I_16x16 )
641     {
642         for( i = 0; i < 16; i++ )
643         {
644             int nz = array_non_zero( h->dct.luma4x4[i] );
645             h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
646             h->mb.i_cbp_luma |= nz;
647         }
648         h->mb.i_cbp_luma *= 0xf;
649         h->mb.cache.non_zero_count[x264_scan8[24]] = array_non_zero( h->dct.luma16x16_dc );
650     }
651     else
652     {
653         for( i = 0; i < 4; i++)
654         {
655             if(!nnz8x8[i])
656             {
657                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[0+i*4]] = 0;
658                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[2+i*4]] = 0;
659             }
660             else if( h->mb.b_transform_8x8 )
661             {
662                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[0+4*i]] = nnz8x8[i] * 0x0101;
663                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[2+4*i]] = nnz8x8[i] * 0x0101;
664                 h->mb.i_cbp_luma |= nnz8x8[i] << i;
665             }
666             else
667             {
668                 int nz, cbp = 0;
669                 for( j = 0; j < 4; j++ )
670                 {
671                     nz = array_non_zero( h->dct.luma4x4[j+4*i] );
672                     h->mb.cache.non_zero_count[x264_scan8[j+4*i]] = nz;
673                     cbp |= nz;
674                 }
675                 h->mb.i_cbp_luma |= cbp << i;
676             }
677         }
678         h->mb.cache.non_zero_count[x264_scan8[24]] = 0;
679     }
680
681     if( h->param.b_cabac )
682     {
683         i_cbp_dc = h->mb.cache.non_zero_count[x264_scan8[24]]
684                  | h->mb.cache.non_zero_count[x264_scan8[25]] << 1
685                  | h->mb.cache.non_zero_count[x264_scan8[26]] << 2;
686     }
687
688     /* store cbp */
689     h->mb.cbp[h->mb.i_mb_xy] = (i_cbp_dc << 8) | (h->mb.i_cbp_chroma << 4) | h->mb.i_cbp_luma;
690
691     /* Check for P_SKIP
692      * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
693      *      (if multiple mv give same result)*/
694     if( !b_force_no_skip )
695     {
696         if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
697             !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
698             *(uint32_t*)h->mb.cache.mv[0][x264_scan8[0]] == *(uint32_t*)h->mb.cache.pskip_mv
699             && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
700         {
701             h->mb.i_type = P_SKIP;
702         }
703
704         /* Check for B_SKIP */
705         if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
706         {
707             h->mb.i_type = B_SKIP;
708         }
709     }
710 }
711
712 /*****************************************************************************
713  * x264_macroblock_probe_skip:
714  *  Check if the current MB could be encoded as a [PB]_SKIP (it supposes you use
715  *  the previous QP
716  *****************************************************************************/
717 int x264_macroblock_probe_skip( x264_t *h, int b_bidir )
718 {
719     DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] );
720     DECLARE_ALIGNED_16( int16_t dct2x2[2][2] );
721     DECLARE_ALIGNED_16( int16_t dctscan[16] );
722
723     int i_qp = h->mb.i_qp;
724     int mvp[2];
725     int ch, thresh;
726
727     int i8x8, i4x4;
728     int i_decimate_mb;
729
730     if( !b_bidir )
731     {
732         /* Get the MV */
733         mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
734         mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
735
736         /* Motion compensation */
737         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
738                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
739                        mvp[0], mvp[1], 16, 16 );
740     }
741
742     for( i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
743     {
744         int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
745         int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
746         /* get luma diff */
747         h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset,
748                                     h->mb.pic.p_fdec[0] + fdec_offset );
749         /* encode one 4x4 block */
750         for( i4x4 = 0; i4x4 < 4; i4x4++ )
751         {
752             h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] );
753             if( !array_non_zero(dct4x4[i4x4]) )
754                 continue;
755             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
756             i_decimate_mb += h->quantf.decimate_score16( dctscan );
757             if( i_decimate_mb >= 6 )
758                 return 0;
759         }
760     }
761
762     /* encode chroma */
763     i_qp = h->mb.i_chroma_qp;
764     thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
765
766     for( ch = 0; ch < 2; ch++ )
767     {
768         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
769         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
770
771         if( !b_bidir )
772         {
773             h->mc.mc_chroma( h->mb.pic.p_fdec[1+ch],       FDEC_STRIDE,
774                              h->mb.pic.p_fref[0][0][4+ch], h->mb.pic.i_stride[1+ch],
775                              mvp[0], mvp[1], 8, 8 );
776         }
777
778         /* there is almost never a termination during chroma, but we can't avoid the check entirely */
779         /* so instead we check SSD and skip the actual check if the score is low enough. */
780         if( h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE ) < thresh )
781             continue;
782
783         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
784
785         /* calculate dct DC */
786         dct2x2dc( dct2x2, dct4x4 );
787         h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 );
788         if( array_non_zero(dct2x2) )
789             return 0;
790
791         /* calculate dct coeffs */
792         for( i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ )
793         {
794             h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
795             if( !array_non_zero(dct4x4[i4x4]) )
796                 continue;
797             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
798             i_decimate_mb += h->quantf.decimate_score15( dctscan );
799             if( i_decimate_mb >= 7 )
800                 return 0;
801         }
802     }
803
804     h->mb.b_skip_mc = 1;
805     return 1;
806 }
807
808 /****************************************************************************
809  * DCT-domain noise reduction / adaptive deadzone
810  * from libavcodec
811  ****************************************************************************/
812
813 void x264_noise_reduction_update( x264_t *h )
814 {
815     int cat, i;
816     for( cat = 0; cat < 2; cat++ )
817     {
818         int size = cat ? 64 : 16;
819         const uint16_t *weight = cat ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
820
821         if( h->nr_count[cat] > (cat ? (1<<16) : (1<<18)) )
822         {
823             for( i = 0; i < size; i++ )
824                 h->nr_residual_sum[cat][i] >>= 1;
825             h->nr_count[cat] >>= 1;
826         }
827
828         for( i = 0; i < size; i++ )
829             h->nr_offset[cat][i] =
830                 ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
831                  + h->nr_residual_sum[cat][i]/2)
832               / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
833     }
834 }
835
836 /*****************************************************************************
837  * RD only; 4 calls to this do not make up for one macroblock_encode.
838  * doesn't transform chroma dc.
839  *****************************************************************************/
840 void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
841 {
842     int i_qp = h->mb.i_qp;
843     uint8_t *p_fenc = h->mb.pic.p_fenc[0] + (i8&1)*8 + (i8>>1)*8*FENC_STRIDE;
844     uint8_t *p_fdec = h->mb.pic.p_fdec[0] + (i8&1)*8 + (i8>>1)*8*FDEC_STRIDE;
845     int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate;
846     int nnz8x8 = 0;
847     int ch;
848
849     x264_mb_mc_8x8( h, i8 );
850
851     if( h->mb.b_lossless )
852     {
853         int i4;
854         if( h->mb.b_transform_8x8 )
855         {
856             h->zigzagf.sub_8x8( h->dct.luma8x8[i8], p_fenc, p_fdec );
857             nnz8x8 = array_non_zero( h->dct.luma8x8[i8] );
858             *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+0]] = 0x0101 * nnz8x8;
859             *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+2]] = 0x0101 * nnz8x8;
860         }
861         else
862         {
863             for( i4 = i8*4; i4 < i8*4+4; i4++ )
864             {
865                 int nz;
866                 h->zigzagf.sub_4x4( h->dct.luma4x4[i4],
867                                     h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4],
868                                     h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4] );
869                 nz = array_non_zero( h->dct.luma4x4[i4] );
870                 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
871                 nnz8x8 |= nz;
872             }
873         }
874         for( ch = 0; ch < 2; ch++ )
875         {
876             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
877             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
878             h->zigzagf.sub_4x4( h->dct.luma4x4[16+i8+ch*4], p_fenc, p_fdec );
879             h->dct.luma4x4[16+i8+ch*4][0] = 0;
880         }
881         h->mb.cache.non_zero_count[x264_scan8[16+i8]] = array_non_zero( h->dct.luma4x4[16+i8] );
882         h->mb.cache.non_zero_count[x264_scan8[20+i8]] = array_non_zero( h->dct.luma4x4[20+i8] );
883     }
884     else
885     {
886         if( h->mb.b_transform_8x8 )
887         {
888             DECLARE_ALIGNED_16( int16_t dct8x8[8][8] );
889             h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec );
890             x264_quant_8x8( h, dct8x8, i_qp, 0, i8 );
891             h->zigzagf.scan_8x8( h->dct.luma8x8[i8], dct8x8 );
892
893             if( b_decimate && !h->mb.b_trellis )
894                 nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[i8] );
895             else
896                 nnz8x8 = array_non_zero( dct8x8 );
897
898             if( nnz8x8 )
899             {
900                 h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8PY], i_qp );
901                 h->dctf.add8x8_idct8( p_fdec, dct8x8 );
902                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+0]] = 0x0101;
903                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+2]] = 0x0101;
904             }
905             else
906             {
907                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+0]] = 0;
908                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+2]] = 0;
909             }
910         }
911         else
912         {
913             int i4;
914             DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] );
915             h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec );
916             for( i4 = 0; i4 < 4; i4++ )
917                 x264_quant_4x4( h, dct4x4[i4], i_qp, DCT_LUMA_4x4, 0, i8*4+i4 );
918
919             for( i4 = 0; i4 < 4; i4++ )
920                 h->zigzagf.scan_4x4( h->dct.luma4x4[i8*4+i4], dct4x4[i4] );
921
922             if( b_decimate )
923             {
924                 int i_decimate_8x8 = 0;
925                 for( i4 = 0; i4 < 4 && i_decimate_8x8 < 4; i4++ )
926                     i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[i8*4+i4] );
927                 nnz8x8 = 4 <= i_decimate_8x8;
928             }
929             else
930                 nnz8x8 = array_non_zero( dct4x4 );
931
932             if( nnz8x8 )
933             {
934                 for( i4 = 0; i4 < 4; i4++ )
935                 {
936                     if( array_non_zero( dct4x4[i4] ) )
937                     {
938                         h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[CQM_4PY], i_qp );
939                         h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = 1;
940                     }
941                     else
942                         h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = 0;
943                 }
944                 h->dctf.add8x8_idct( p_fdec, dct4x4 );
945             }
946             else
947             {
948                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+0]] = 0;
949                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+2]] = 0;
950             }
951         }
952
953         i_qp = h->mb.i_chroma_qp;
954
955         for( ch = 0; ch < 2; ch++ )
956         {
957             DECLARE_ALIGNED_16( int16_t dct4x4[4][4] );
958             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
959             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
960
961             h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
962             dct4x4[0][0] = 0;
963
964             if( h->mb.b_trellis )
965                 x264_quant_4x4_trellis( h, dct4x4, CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 0 );
966             else
967                 h->quantf.quant_4x4( dct4x4, h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
968
969             if( array_non_zero( dct4x4 ) )
970             {
971                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i8+ch*4], dct4x4 );
972                 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PC], i_qp );
973                 h->dctf.add4x4_idct( p_fdec, dct4x4 );
974                 h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = 1;
975             }
976             else
977                 h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = 0;
978         }
979     }
980     h->mb.i_cbp_luma &= ~(1 << i8);
981     h->mb.i_cbp_luma |= nnz8x8 << i8;
982     h->mb.i_cbp_chroma = 0x02;
983 }
984
985 /*****************************************************************************
986  * RD only, luma only
987  *****************************************************************************/
988 void x264_macroblock_encode_p4x4( x264_t *h, int i4 )
989 {
990     int i_qp = h->mb.i_qp;
991     uint8_t *p_fenc = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[i4]];
992     uint8_t *p_fdec = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
993     const int i_ref = h->mb.cache.ref[0][x264_scan8[i4]];
994     const int mvx   = x264_clip3( h->mb.cache.mv[0][x264_scan8[i4]][0], h->mb.mv_min[0], h->mb.mv_max[0] );
995     const int mvy   = x264_clip3( h->mb.cache.mv[0][x264_scan8[i4]][1], h->mb.mv_min[1], h->mb.mv_max[1] );
996
997     h->mc.mc_luma( p_fdec, FDEC_STRIDE, h->mb.pic.p_fref[0][i_ref], h->mb.pic.i_stride[0], mvx + 4*4*block_idx_x[i4], mvy + 4*4*block_idx_y[i4], 4, 4 );
998
999     if( h->mb.b_lossless )
1000     {
1001         h->zigzagf.sub_4x4( h->dct.luma4x4[i4], p_fenc, p_fdec );
1002         h->mb.cache.non_zero_count[x264_scan8[i4]] = array_non_zero( h->dct.luma4x4[i4] );
1003     }
1004     else
1005     {
1006         DECLARE_ALIGNED_16( int16_t dct4x4[4][4] );
1007         h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1008         x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 0, i4 );
1009         if( array_non_zero( dct4x4 ) )
1010         {
1011             h->zigzagf.scan_4x4( h->dct.luma4x4[i4], dct4x4 );
1012             h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PY], i_qp );
1013             h->dctf.add4x4_idct( p_fdec, dct4x4 );
1014             h->mb.cache.non_zero_count[x264_scan8[i4]] = 1;
1015         }
1016         else
1017             h->mb.cache.non_zero_count[x264_scan8[i4]] = 0;
1018     }
1019 }