]> git.sesse.net Git - x264/blob - encoder/macroblock.c
30df7781810b9b6238cb4cda1f8cb359dfd81b58
[x264] / encoder / macroblock.c
1 /*****************************************************************************
2  * macroblock.c: h264 encoder library
3  *****************************************************************************
4  * Copyright (C) 2003-2008 x264 project
5  *
6  * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7  *          Loren Merritt <lorenm@u.washington.edu>
8  *          Fiona Glaser <fiona@x264.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02111, USA.
23  *****************************************************************************/
24
25 #include "common/common.h"
26 #include "macroblock.h"
27
28 /* These chroma DC functions don't have assembly versions and are only used here. */
29
30 #define ZIG(i,y,x) level[i] = dct[x][y];
31 static inline void zigzag_scan_2x2_dc( int16_t level[4], int16_t dct[2][2] )
32 {
33     ZIG(0,0,0)
34     ZIG(1,0,1)
35     ZIG(2,1,0)
36     ZIG(3,1,1)
37 }
38 #undef ZIG
39
40 #define IDCT_DEQUANT_START \
41     int d0 = dct[0][0] + dct[0][1]; \
42     int d1 = dct[1][0] + dct[1][1]; \
43     int d2 = dct[0][0] - dct[0][1]; \
44     int d3 = dct[1][0] - dct[1][1]; \
45     int dmf = dequant_mf[i_qp%6][0][0]; \
46     int qbits = i_qp/6 - 5; \
47     if( qbits > 0 ) \
48     { \
49         dmf <<= qbits; \
50         qbits = 0; \
51     }
52
53 static inline void idct_dequant_2x2_dc( int16_t dct[2][2], int16_t dct4x4[4][4][4], int dequant_mf[6][4][4], int i_qp )
54 {
55     IDCT_DEQUANT_START
56     dct4x4[0][0][0] = (d0 + d1) * dmf >> -qbits;
57     dct4x4[1][0][0] = (d0 - d1) * dmf >> -qbits;
58     dct4x4[2][0][0] = (d2 + d3) * dmf >> -qbits;
59     dct4x4[3][0][0] = (d2 - d3) * dmf >> -qbits;
60 }
61
62 static inline void idct_dequant_2x2_dconly( int16_t dct[2][2], int dequant_mf[6][4][4], int i_qp )
63 {
64     IDCT_DEQUANT_START
65     dct[0][0] = (d0 + d1) * dmf >> -qbits;
66     dct[0][1] = (d0 - d1) * dmf >> -qbits;
67     dct[1][0] = (d2 + d3) * dmf >> -qbits;
68     dct[1][1] = (d2 - d3) * dmf >> -qbits;
69 }
70
71 static inline void dct2x2dc( int16_t d[2][2], int16_t dct4x4[4][4][4] )
72 {
73     int d0 = dct4x4[0][0][0] + dct4x4[1][0][0];
74     int d1 = dct4x4[2][0][0] + dct4x4[3][0][0];
75     int d2 = dct4x4[0][0][0] - dct4x4[1][0][0];
76     int d3 = dct4x4[2][0][0] - dct4x4[3][0][0];
77     d[0][0] = d0 + d1;
78     d[1][0] = d2 + d3;
79     d[0][1] = d0 - d1;
80     d[1][1] = d2 - d3;
81     dct4x4[0][0][0] = 0;
82     dct4x4[1][0][0] = 0;
83     dct4x4[2][0][0] = 0;
84     dct4x4[3][0][0] = 0;
85 }
86
87 static ALWAYS_INLINE void x264_quant_4x4( x264_t *h, int16_t dct[4][4], int i_qp, int i_ctxBlockCat, int b_intra, int idx )
88 {
89     int i_quant_cat = b_intra ? CQM_4IY : CQM_4PY;
90     if( h->mb.b_trellis )
91         x264_quant_4x4_trellis( h, dct, i_quant_cat, i_qp, i_ctxBlockCat, b_intra, idx );
92     else
93         h->quantf.quant_4x4( dct, h->quant4_mf[i_quant_cat][i_qp], h->quant4_bias[i_quant_cat][i_qp] );
94 }
95
96 static ALWAYS_INLINE void x264_quant_8x8( x264_t *h, int16_t dct[8][8], int i_qp, int b_intra, int idx )
97 {
98     int i_quant_cat = b_intra ? CQM_8IY : CQM_8PY;
99     if( h->mb.b_trellis )
100         x264_quant_8x8_trellis( h, dct, i_quant_cat, i_qp, b_intra, idx );
101     else
102         h->quantf.quant_8x8( dct, h->quant8_mf[i_quant_cat][i_qp], h->quant8_bias[i_quant_cat][i_qp] );
103 }
104
105 void x264_mb_encode_i4x4( x264_t *h, int idx, int i_qp )
106 {
107     uint8_t *p_src = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[idx]];
108     uint8_t *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[idx]];
109     DECLARE_ALIGNED_16( int16_t dct4x4[4][4] );
110
111     if( h->mb.b_lossless )
112     {
113         h->zigzagf.sub_4x4( h->dct.luma4x4[idx], p_src, p_dst );
114         return;
115     }
116
117     h->dctf.sub4x4_dct( dct4x4, p_src, p_dst );
118
119     x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 1, idx );
120
121     if( array_non_zero( dct4x4 ) )
122     {
123         h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4 );
124         h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4IY], i_qp );
125
126         /* output samples to fdec */
127         h->dctf.add4x4_idct( p_dst, dct4x4 );
128     }
129     else
130         memset( h->dct.luma4x4[idx], 0, sizeof(h->dct.luma4x4[idx]));
131 }
132
133 void x264_mb_encode_i8x8( x264_t *h, int idx, int i_qp )
134 {
135     int x = 8 * (idx&1);
136     int y = 8 * (idx>>1);
137     uint8_t *p_src = &h->mb.pic.p_fenc[0][x+y*FENC_STRIDE];
138     uint8_t *p_dst = &h->mb.pic.p_fdec[0][x+y*FDEC_STRIDE];
139     DECLARE_ALIGNED_16( int16_t dct8x8[8][8] );
140
141     if( h->mb.b_lossless )
142     {
143         h->zigzagf.sub_8x8( h->dct.luma8x8[idx], p_src, p_dst );
144         return;
145     }
146
147     h->dctf.sub8x8_dct8( dct8x8, p_src, p_dst );
148
149     x264_quant_8x8( h, dct8x8, i_qp, 1, idx );
150
151     h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8 );
152     h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8IY], i_qp );
153     h->dctf.add8x8_idct8( p_dst, dct8x8 );
154 }
155
156 static void x264_mb_encode_i16x16( x264_t *h, int i_qp )
157 {
158     uint8_t  *p_src = h->mb.pic.p_fenc[0];
159     uint8_t  *p_dst = h->mb.pic.p_fdec[0];
160
161     DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] );
162     DECLARE_ALIGNED_16( int16_t dct_dc4x4[4][4] );
163
164     int i;
165
166     if( h->mb.b_lossless )
167     {
168         for( i = 0; i < 16; i++ )
169         {
170             int oe = block_idx_xy_fenc[i];
171             int od = block_idx_xy_fdec[i];
172             h->zigzagf.sub_4x4( h->dct.luma4x4[i], p_src+oe, p_dst+od );
173             dct_dc4x4[0][block_idx_yx_1d[i]] = h->dct.luma4x4[i][0];
174             h->dct.luma4x4[i][0] = 0;
175         }
176         h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
177         return;
178     }
179
180     h->dctf.sub16x16_dct( dct4x4, p_src, p_dst );
181     for( i = 0; i < 16; i++ )
182     {
183         /* copy dc coeff */
184         dct_dc4x4[0][block_idx_xy_1d[i]] = dct4x4[i][0][0];
185         dct4x4[i][0][0] = 0;
186
187         /* quant/scan/dequant */
188         x264_quant_4x4( h, dct4x4[i], i_qp, DCT_LUMA_AC, 1, i );
189
190         h->zigzagf.scan_4x4( h->dct.luma4x4[i], dct4x4[i] );
191         h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IY], i_qp );
192     }
193
194     h->dctf.dct4x4dc( dct_dc4x4 );
195     if( h->mb.b_trellis )
196         x264_quant_dc_trellis( h, (int16_t*)dct_dc4x4, CQM_4IY, i_qp, DCT_LUMA_DC, 1);
197     else
198         h->quantf.quant_4x4_dc( dct_dc4x4, h->quant4_mf[CQM_4IY][i_qp][0]>>1, h->quant4_bias[CQM_4IY][i_qp][0]<<1 );
199     h->zigzagf.scan_4x4( h->dct.luma16x16_dc, dct_dc4x4 );
200
201     /* output samples to fdec */
202     h->dctf.idct4x4dc( dct_dc4x4 );
203     h->quantf.dequant_4x4_dc( dct_dc4x4, h->dequant4_mf[CQM_4IY], i_qp );  /* XXX not inversed */
204
205     /* calculate dct coeffs */
206     for( i = 0; i < 16; i++ )
207     {
208         /* copy dc coeff */
209         dct4x4[i][0][0] = dct_dc4x4[0][block_idx_xy_1d[i]];
210     }
211     /* put pixels to fdec */
212     h->dctf.add16x16_idct( p_dst, dct4x4 );
213 }
214
215 void x264_mb_encode_8x8_chroma( x264_t *h, int b_inter, int i_qp )
216 {
217     int i, ch, nz;
218     int b_decimate = b_inter && (h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate);
219     h->mb.i_cbp_chroma = 0;
220
221     for( ch = 0; ch < 2; ch++ )
222     {
223         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
224         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
225         int i_decimate_score = 0;
226
227         DECLARE_ALIGNED_16( int16_t dct2x2[2][2]  );
228         DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] );
229
230         if( h->mb.b_lossless )
231         {
232             for( i = 0; i < 4; i++ )
233             {
234                 int oe = block_idx_x[i]*4 + block_idx_y[i]*4*FENC_STRIDE;
235                 int od = block_idx_x[i]*4 + block_idx_y[i]*4*FDEC_STRIDE;
236                 h->zigzagf.sub_4x4( h->dct.luma4x4[16+i+ch*4], p_src+oe, p_dst+od );
237                 h->dct.chroma_dc[ch][i] = h->dct.luma4x4[16+i+ch*4][0];
238                 h->dct.luma4x4[16+i+ch*4][0] = 0;
239                 nz = array_non_zero( h->dct.luma4x4[16+i+ch*4] );
240                 h->mb.cache.non_zero_count[x264_scan8[16+i+ch*4]] = nz;
241                 h->mb.i_cbp_chroma |= nz;
242             }
243             h->mb.cache.non_zero_count[x264_scan8[25]+ch] = array_non_zero( h->dct.chroma_dc[ch] );
244             continue;
245         }
246
247         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
248         dct2x2dc( dct2x2, dct4x4 );
249         /* calculate dct coeffs */
250         for( i = 0; i < 4; i++ )
251         {
252             if( h->mb.b_trellis )
253                 x264_quant_4x4_trellis( h, dct4x4[i], CQM_4IC+b_inter, i_qp, DCT_CHROMA_AC, !b_inter, 0 );
254             else
255                 h->quantf.quant_4x4( dct4x4[i], h->quant4_mf[CQM_4IC+b_inter][i_qp], h->quant4_bias[CQM_4IC+b_inter][i_qp] );
256             h->zigzagf.scan_4x4( h->dct.luma4x4[16+i+ch*4], dct4x4[i] );
257
258             if( b_decimate )
259                 i_decimate_score += h->quantf.decimate_score15( h->dct.luma4x4[16+i+ch*4] );
260         }
261
262         if( h->mb.b_trellis )
263             x264_quant_dc_trellis( h, (int16_t*)dct2x2, CQM_4IC+b_inter, i_qp, DCT_CHROMA_DC, !b_inter );
264         else
265             h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4IC+b_inter][i_qp][0]>>1, h->quant4_bias[CQM_4IC+b_inter][i_qp][0]<<1 );
266
267         if( b_decimate && i_decimate_score < 7 )
268         {
269             /* Decimate the block */
270             h->mb.cache.non_zero_count[x264_scan8[16+0]+24*ch] = 0;
271             h->mb.cache.non_zero_count[x264_scan8[16+1]+24*ch] = 0;
272             h->mb.cache.non_zero_count[x264_scan8[16+2]+24*ch] = 0;
273             h->mb.cache.non_zero_count[x264_scan8[16+3]+24*ch] = 0;
274             if( !array_non_zero( dct2x2 ) ) /* Whole block is empty */
275             {
276                 h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 0;
277                 continue;
278             }
279             /* DC-only */
280             h->mb.cache.non_zero_count[x264_scan8[25]+ch] = 1;
281             zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
282             idct_dequant_2x2_dconly( dct2x2, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
283             h->dctf.add8x8_idct_dc( p_dst, dct2x2 );
284         }
285         else
286         {
287             for( i = 0; i < 4; i++ )
288             {
289                 nz = array_non_zero( h->dct.luma4x4[16+ch*4+i] );
290                 h->mb.cache.non_zero_count[x264_scan8[16+ch*4+i]] = nz;
291                 h->mb.i_cbp_chroma |= nz;
292                 if( nz )
293                     h->quantf.dequant_4x4( dct4x4[i], h->dequant4_mf[CQM_4IC + b_inter], i_qp );
294             }
295             /* Don't optimize for the AC-only case--it's very rare */
296             h->mb.cache.non_zero_count[x264_scan8[25]+ch] = array_non_zero( dct2x2 );
297             zigzag_scan_2x2_dc( h->dct.chroma_dc[ch], dct2x2 );
298             idct_dequant_2x2_dc( dct2x2, dct4x4, h->dequant4_mf[CQM_4IC + b_inter], i_qp );
299             h->dctf.add8x8_idct( p_dst, dct4x4 );
300         }
301     }
302
303     if( h->mb.i_cbp_chroma )
304         h->mb.i_cbp_chroma = 2;    /* dc+ac (we can't do only ac) */
305     else if( h->mb.cache.non_zero_count[x264_scan8[25]] |
306              h->mb.cache.non_zero_count[x264_scan8[26]] )
307         h->mb.i_cbp_chroma = 1;    /* dc only */
308 }
309
310 static void x264_macroblock_encode_skip( x264_t *h )
311 {
312     h->mb.i_cbp_luma = 0x00;
313     h->mb.i_cbp_chroma = 0x00;
314     memset( h->mb.cache.non_zero_count, 0, X264_SCAN8_SIZE );
315     /* store cbp */
316     h->mb.cbp[h->mb.i_mb_xy] = 0;
317 }
318
319 /*****************************************************************************
320  * x264_macroblock_encode_pskip:
321  *  Encode an already marked skip block
322  *****************************************************************************/
323 static void x264_macroblock_encode_pskip( x264_t *h )
324 {
325     const int mvx = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][0],
326                                 h->mb.mv_min[0], h->mb.mv_max[0] );
327     const int mvy = x264_clip3( h->mb.cache.mv[0][x264_scan8[0]][1],
328                                 h->mb.mv_min[1], h->mb.mv_max[1] );
329
330     /* don't do pskip motion compensation if it was already done in macroblock_analyse */
331     if( !h->mb.b_skip_mc )
332     {
333         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
334                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
335                        mvx, mvy, 16, 16 );
336
337         h->mc.mc_chroma( h->mb.pic.p_fdec[1],       FDEC_STRIDE,
338                          h->mb.pic.p_fref[0][0][4], h->mb.pic.i_stride[1],
339                          mvx, mvy, 8, 8 );
340
341         h->mc.mc_chroma( h->mb.pic.p_fdec[2],       FDEC_STRIDE,
342                          h->mb.pic.p_fref[0][0][5], h->mb.pic.i_stride[2],
343                          mvx, mvy, 8, 8 );
344     }
345
346     x264_macroblock_encode_skip( h );
347 }
348
349 /*****************************************************************************
350  * Intra prediction for predictive lossless mode.
351  *****************************************************************************/
352
353 /* Note that these functions take a shortcut (mc.copy instead of actual pixel prediction) which assumes
354  * that the edge pixels of the reconstructed frame are the same as that of the source frame.  This means
355  * they will only work correctly if the neighboring blocks are losslessly coded.  In practice, this means
356  * lossless mode cannot be mixed with lossy mode within a frame. */
357 /* This can be resolved by explicitly copying the edge pixels after doing the mc.copy, but this doesn't
358  * need to be done unless we decide to allow mixing lossless and lossy compression. */
359
360 void x264_predict_lossless_8x8_chroma( x264_t *h, int i_mode )
361 {
362     int stride = h->fenc->i_stride[1] << h->mb.b_interlaced;
363     if( i_mode == I_PRED_CHROMA_V )
364     {
365         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-stride, stride, 8 );
366         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-stride, stride, 8 );
367     }
368     else if( i_mode == I_PRED_CHROMA_H )
369     {
370         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[1], FDEC_STRIDE, h->mb.pic.p_fenc_plane[1]-1, stride, 8 );
371         h->mc.copy[PIXEL_8x8]( h->mb.pic.p_fdec[2], FDEC_STRIDE, h->mb.pic.p_fenc_plane[2]-1, stride, 8 );
372     }
373     else
374     {
375         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
376         h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
377     }
378 }
379
380 void x264_predict_lossless_4x4( x264_t *h, uint8_t *p_dst, int idx, int i_mode )
381 {
382     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
383     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + block_idx_x[idx]*4 + block_idx_y[idx]*4 * stride;
384
385     if( i_mode == I_PRED_4x4_V )
386         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-stride, stride, 4 );
387     else if( i_mode == I_PRED_4x4_H )
388         h->mc.copy[PIXEL_4x4]( p_dst, FDEC_STRIDE, p_src-1, stride, 4 );
389     else
390         h->predict_4x4[i_mode]( p_dst );
391 }
392
393 void x264_predict_lossless_8x8( x264_t *h, uint8_t *p_dst, int idx, int i_mode, uint8_t edge[33] )
394 {
395     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
396     uint8_t *p_src = h->mb.pic.p_fenc_plane[0] + (idx&1)*8 + (idx>>1)*8*stride;
397
398     if( i_mode == I_PRED_8x8_V )
399         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-stride, stride, 8 );
400     else if( i_mode == I_PRED_8x8_H )
401         h->mc.copy[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src-1, stride, 8 );
402     else
403         h->predict_8x8[i_mode]( p_dst, edge );
404 }
405
406 void x264_predict_lossless_16x16( x264_t *h, int i_mode )
407 {
408     int stride = h->fenc->i_stride[0] << h->mb.b_interlaced;
409     if( i_mode == I_PRED_16x16_V )
410         h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-stride, stride, 16 );
411     else if( i_mode == I_PRED_16x16_H )
412         h->mc.copy_16x16_unaligned( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.p_fenc_plane[0]-1, stride, 16 );
413     else
414         h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
415 }
416
417 /*****************************************************************************
418  * x264_macroblock_encode:
419  *****************************************************************************/
420 void x264_macroblock_encode( x264_t *h )
421 {
422     int i_cbp_dc = 0;
423     int i_qp = h->mb.i_qp;
424     int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate;
425     int b_force_no_skip = 0;
426     int i,j,idx;
427     uint8_t nnz8x8[4] = {1,1,1,1};
428
429     if( h->sh.b_mbaff
430         && h->mb.i_mb_xy == h->sh.i_first_mb + h->mb.i_mb_stride
431         && IS_SKIP(h->mb.type[h->sh.i_first_mb]) )
432     {
433         /* The first skip is predicted to be a frame mb pair.
434          * We don't yet support the aff part of mbaff, so force it to non-skip
435          * so that we can pick the aff flag. */
436         b_force_no_skip = 1;
437         if( IS_SKIP(h->mb.i_type) )
438         {
439             if( h->mb.i_type == P_SKIP )
440                 h->mb.i_type = P_L0;
441             else if( h->mb.i_type == B_SKIP )
442                 h->mb.i_type = B_DIRECT;
443         }
444     }
445
446     if( h->mb.i_type == P_SKIP )
447     {
448         /* A bit special */
449         x264_macroblock_encode_pskip( h );
450         return;
451     }
452     if( h->mb.i_type == B_SKIP )
453     {
454         /* don't do bskip motion compensation if it was already done in macroblock_analyse */
455         if( !h->mb.b_skip_mc )
456             x264_mb_mc( h );
457         x264_macroblock_encode_skip( h );
458         return;
459     }
460
461     if( h->mb.i_type == I_16x16 )
462     {
463         const int i_mode = h->mb.i_intra16x16_pred_mode;
464         h->mb.b_transform_8x8 = 0;
465
466         if( h->mb.b_lossless )
467             x264_predict_lossless_16x16( h, i_mode );
468         else
469             h->predict_16x16[i_mode]( h->mb.pic.p_fdec[0] );
470
471         /* encode the 16x16 macroblock */
472         x264_mb_encode_i16x16( h, i_qp );
473     }
474     else if( h->mb.i_type == I_8x8 )
475     {
476         DECLARE_ALIGNED_16( uint8_t edge[33] );
477         h->mb.b_transform_8x8 = 1;
478         /* If we already encoded 3 of the 4 i8x8 blocks, we don't have to do them again. */
479         if( h->mb.i_skip_intra )
480         {
481             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i8x8_fdec_buf, 16, 16 );
482             /* In RD mode, restore the now-overwritten DCT data. */
483             if( h->mb.i_skip_intra == 2 )
484                 h->mc.memcpy_aligned( h->dct.luma8x8, h->mb.pic.i8x8_dct_buf, sizeof(h->mb.pic.i8x8_dct_buf) );
485         }
486         for( i = h->mb.i_skip_intra ? 3 : 0 ; i < 4; i++ )
487         {
488             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][8 * (i&1) + 8 * (i>>1) * FDEC_STRIDE];
489             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[4*i]];
490             x264_predict_8x8_filter( p_dst, edge, h->mb.i_neighbour8[i], x264_pred_i4x4_neighbors[i_mode] );
491
492             if( h->mb.b_lossless )
493                 x264_predict_lossless_8x8( h, p_dst, i, i_mode, edge );
494             else
495                 h->predict_8x8[i_mode]( p_dst, edge );
496
497             x264_mb_encode_i8x8( h, i, i_qp );
498         }
499         for( i = 0; i < 4; i++ )
500             nnz8x8[i] = array_non_zero( h->dct.luma8x8[i] );
501     }
502     else if( h->mb.i_type == I_4x4 )
503     {
504         h->mb.b_transform_8x8 = 0;
505         /* If we already encoded 15 of the 16 i4x4 blocks, we don't have to do them again. */
506         if( h->mb.i_skip_intra )
507         {
508             h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fdec[0], FDEC_STRIDE, h->mb.pic.i4x4_fdec_buf, 16, 16 );
509             /* In RD mode, restore the now-overwritten DCT data. */
510             if( h->mb.i_skip_intra == 2 )
511                 h->mc.memcpy_aligned( h->dct.luma4x4, h->mb.pic.i4x4_dct_buf, sizeof(h->mb.pic.i4x4_dct_buf) );
512         }
513         for( i = h->mb.i_skip_intra ? 15 : 0 ; i < 16; i++ )
514         {
515             uint8_t  *p_dst = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i]];
516             int      i_mode = h->mb.cache.intra4x4_pred_mode[x264_scan8[i]];
517
518             if( (h->mb.i_neighbour4[i] & (MB_TOPRIGHT|MB_TOP)) == MB_TOP )
519                 /* emulate missing topright samples */
520                 *(uint32_t*) &p_dst[4-FDEC_STRIDE] = p_dst[3-FDEC_STRIDE] * 0x01010101U;
521
522             if( h->mb.b_lossless )
523                 x264_predict_lossless_4x4( h, p_dst, i, i_mode );
524             else
525                 h->predict_4x4[i_mode]( p_dst );
526             x264_mb_encode_i4x4( h, i, i_qp );
527         }
528     }
529     else    /* Inter MB */
530     {
531         int i8x8, i4x4;
532         int i_decimate_mb = 0;
533
534         /* Don't repeat motion compensation if it was already done in non-RD transform analysis */
535         if( !h->mb.b_skip_mc )
536             x264_mb_mc( h );
537
538         if( h->mb.b_lossless )
539         {
540             if( h->mb.b_transform_8x8 )
541                 for( i8x8 = 0; i8x8 < 4; i8x8++ )
542                 {
543                     int x = 8*(i8x8&1);
544                     int y = 8*(i8x8>>1);
545                     h->zigzagf.sub_8x8( h->dct.luma8x8[i8x8],
546                                         h->mb.pic.p_fenc[0]+x+y*FENC_STRIDE,
547                                         h->mb.pic.p_fdec[0]+x+y*FDEC_STRIDE );
548                     nnz8x8[i8x8] = array_non_zero( h->dct.luma8x8[i8x8] );
549                 }
550             else
551                 for( i4x4 = 0; i4x4 < 16; i4x4++ )
552                 {
553                     h->zigzagf.sub_4x4( h->dct.luma4x4[i4x4],
554                                         h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4x4],
555                                         h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4x4] );
556                 }
557         }
558         else if( h->mb.b_transform_8x8 )
559         {
560             DECLARE_ALIGNED_16( int16_t dct8x8[4][8][8] );
561             b_decimate &= !h->mb.b_trellis; // 8x8 trellis is inherently optimal decimation
562             h->dctf.sub16x16_dct8( dct8x8, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
563             h->nr_count[1] += h->mb.b_noise_reduction * 4;
564
565             for( idx = 0; idx < 4; idx++ )
566             {
567                 if( h->mb.b_noise_reduction )
568                     h->quantf.denoise_dct( *dct8x8[idx], h->nr_residual_sum[1], h->nr_offset[1], 64 );
569                 x264_quant_8x8( h, dct8x8[idx], i_qp, 0, idx );
570
571                 h->zigzagf.scan_8x8( h->dct.luma8x8[idx], dct8x8[idx] );
572
573                 if( b_decimate )
574                 {
575                     int i_decimate_8x8 = h->quantf.decimate_score64( h->dct.luma8x8[idx] );
576                     i_decimate_mb += i_decimate_8x8;
577                     if( i_decimate_8x8 < 4 )
578                         nnz8x8[idx] = 0;
579                 }
580                 else
581                     nnz8x8[idx] = array_non_zero( dct8x8[idx] );
582             }
583
584             if( i_decimate_mb < 6 && b_decimate )
585                 *(uint32_t*)nnz8x8 = 0;
586             else
587             {
588                 for( idx = 0; idx < 4; idx++ )
589                     if( nnz8x8[idx] )
590                     {
591                         h->quantf.dequant_8x8( dct8x8[idx], h->dequant8_mf[CQM_8PY], i_qp );
592                         h->dctf.add8x8_idct8( &h->mb.pic.p_fdec[0][(idx&1)*8 + (idx>>1)*8*FDEC_STRIDE], dct8x8[idx] );
593                     }
594             }
595         }
596         else
597         {
598             DECLARE_ALIGNED_16( int16_t dct4x4[16][4][4] );
599             h->dctf.sub16x16_dct( dct4x4, h->mb.pic.p_fenc[0], h->mb.pic.p_fdec[0] );
600             h->nr_count[0] += h->mb.b_noise_reduction * 16;
601
602             for( i8x8 = 0; i8x8 < 4; i8x8++ )
603             {
604                 int i_decimate_8x8;
605
606                 /* encode one 4x4 block */
607                 i_decimate_8x8 = 0;
608                 for( i4x4 = 0; i4x4 < 4; i4x4++ )
609                 {
610                     idx = i8x8 * 4 + i4x4;
611
612                     if( h->mb.b_noise_reduction )
613                         h->quantf.denoise_dct( *dct4x4[idx], h->nr_residual_sum[0], h->nr_offset[0], 16 );
614                     x264_quant_4x4( h, dct4x4[idx], i_qp, DCT_LUMA_4x4, 0, idx );
615
616                     h->zigzagf.scan_4x4( h->dct.luma4x4[idx], dct4x4[idx] );
617
618                     if( b_decimate && i_decimate_8x8 < 6 )
619                         i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[idx] );
620                 }
621
622                 /* decimate this 8x8 block */
623                 i_decimate_mb += i_decimate_8x8;
624                 if( i_decimate_8x8 < 4 && b_decimate )
625                     nnz8x8[i8x8] = 0;
626             }
627
628             if( i_decimate_mb < 6 && b_decimate )
629                 *(uint32_t*)nnz8x8 = 0;
630             else
631             {
632                 for( i8x8 = 0; i8x8 < 4; i8x8++ )
633                     if( nnz8x8[i8x8] )
634                     {
635                         for( i = 0; i < 4; i++ )
636                             h->quantf.dequant_4x4( dct4x4[i8x8*4+i], h->dequant4_mf[CQM_4PY], i_qp );
637                         h->dctf.add8x8_idct( &h->mb.pic.p_fdec[0][(i8x8&1)*8 + (i8x8>>1)*8*FDEC_STRIDE], &dct4x4[i8x8*4] );
638                     }
639             }
640         }
641     }
642
643     /* encode chroma */
644     if( IS_INTRA( h->mb.i_type ) )
645     {
646         const int i_mode = h->mb.i_chroma_pred_mode;
647         if( h->mb.b_lossless )
648             x264_predict_lossless_8x8_chroma( h, i_mode );
649         else
650         {
651             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[1] );
652             h->predict_8x8c[i_mode]( h->mb.pic.p_fdec[2] );
653         }
654     }
655
656     /* encode the 8x8 blocks */
657     x264_mb_encode_8x8_chroma( h, !IS_INTRA( h->mb.i_type ), h->mb.i_chroma_qp );
658
659     /* coded block pattern and non_zero_count */
660     h->mb.i_cbp_luma = 0x00;
661     if( h->mb.i_type == I_16x16 )
662     {
663         for( i = 0; i < 16; i++ )
664         {
665             int nz = array_non_zero( h->dct.luma4x4[i] );
666             h->mb.cache.non_zero_count[x264_scan8[i]] = nz;
667             h->mb.i_cbp_luma |= nz;
668         }
669         h->mb.i_cbp_luma *= 0xf;
670         h->mb.cache.non_zero_count[x264_scan8[24]] = array_non_zero( h->dct.luma16x16_dc );
671     }
672     else
673     {
674         for( i = 0; i < 4; i++)
675         {
676             if(!nnz8x8[i])
677             {
678                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[0+i*4]] = 0;
679                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[2+i*4]] = 0;
680             }
681             else if( h->mb.b_transform_8x8 )
682             {
683                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[0+4*i]] = nnz8x8[i] * 0x0101;
684                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[2+4*i]] = nnz8x8[i] * 0x0101;
685                 h->mb.i_cbp_luma |= nnz8x8[i] << i;
686             }
687             else
688             {
689                 int nz, cbp = 0;
690                 for( j = 0; j < 4; j++ )
691                 {
692                     nz = array_non_zero( h->dct.luma4x4[j+4*i] );
693                     h->mb.cache.non_zero_count[x264_scan8[j+4*i]] = nz;
694                     cbp |= nz;
695                 }
696                 h->mb.i_cbp_luma |= cbp << i;
697             }
698         }
699         h->mb.cache.non_zero_count[x264_scan8[24]] = 0;
700     }
701
702     if( h->param.b_cabac )
703     {
704         i_cbp_dc = h->mb.cache.non_zero_count[x264_scan8[24]]
705                  | h->mb.cache.non_zero_count[x264_scan8[25]] << 1
706                  | h->mb.cache.non_zero_count[x264_scan8[26]] << 2;
707     }
708
709     /* store cbp */
710     h->mb.cbp[h->mb.i_mb_xy] = (i_cbp_dc << 8) | (h->mb.i_cbp_chroma << 4) | h->mb.i_cbp_luma;
711
712     /* Check for P_SKIP
713      * XXX: in the me perhaps we should take x264_mb_predict_mv_pskip into account
714      *      (if multiple mv give same result)*/
715     if( !b_force_no_skip )
716     {
717         if( h->mb.i_type == P_L0 && h->mb.i_partition == D_16x16 &&
718             !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) &&
719             *(uint32_t*)h->mb.cache.mv[0][x264_scan8[0]] == *(uint32_t*)h->mb.cache.pskip_mv
720             && h->mb.cache.ref[0][x264_scan8[0]] == 0 )
721         {
722             h->mb.i_type = P_SKIP;
723         }
724
725         /* Check for B_SKIP */
726         if( h->mb.i_type == B_DIRECT && !(h->mb.i_cbp_luma | h->mb.i_cbp_chroma) )
727         {
728             h->mb.i_type = B_SKIP;
729         }
730     }
731 }
732
733 /*****************************************************************************
734  * x264_macroblock_probe_skip:
735  *  Check if the current MB could be encoded as a [PB]_SKIP (it supposes you use
736  *  the previous QP
737  *****************************************************************************/
738 int x264_macroblock_probe_skip( x264_t *h, int b_bidir )
739 {
740     DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] );
741     DECLARE_ALIGNED_16( int16_t dct2x2[2][2] );
742     DECLARE_ALIGNED_16( int16_t dctscan[16] );
743
744     int i_qp = h->mb.i_qp;
745     int mvp[2];
746     int ch, thresh;
747
748     int i8x8, i4x4;
749     int i_decimate_mb;
750
751     if( !b_bidir )
752     {
753         /* Get the MV */
754         mvp[0] = x264_clip3( h->mb.cache.pskip_mv[0], h->mb.mv_min[0], h->mb.mv_max[0] );
755         mvp[1] = x264_clip3( h->mb.cache.pskip_mv[1], h->mb.mv_min[1], h->mb.mv_max[1] );
756
757         /* Motion compensation */
758         h->mc.mc_luma( h->mb.pic.p_fdec[0],    FDEC_STRIDE,
759                        h->mb.pic.p_fref[0][0], h->mb.pic.i_stride[0],
760                        mvp[0], mvp[1], 16, 16 );
761     }
762
763     for( i8x8 = 0, i_decimate_mb = 0; i8x8 < 4; i8x8++ )
764     {
765         int fenc_offset = (i8x8&1) * 8 + (i8x8>>1) * FENC_STRIDE * 8;
766         int fdec_offset = (i8x8&1) * 8 + (i8x8>>1) * FDEC_STRIDE * 8;
767         /* get luma diff */
768         h->dctf.sub8x8_dct( dct4x4, h->mb.pic.p_fenc[0] + fenc_offset,
769                                     h->mb.pic.p_fdec[0] + fdec_offset );
770         /* encode one 4x4 block */
771         for( i4x4 = 0; i4x4 < 4; i4x4++ )
772         {
773             h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PY][i_qp], h->quant4_bias[CQM_4PY][i_qp] );
774             if( !array_non_zero(dct4x4[i4x4]) )
775                 continue;
776             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
777             i_decimate_mb += h->quantf.decimate_score16( dctscan );
778             if( i_decimate_mb >= 6 )
779                 return 0;
780         }
781     }
782
783     /* encode chroma */
784     i_qp = h->mb.i_chroma_qp;
785     thresh = (x264_lambda2_tab[i_qp] + 32) >> 6;
786
787     for( ch = 0; ch < 2; ch++ )
788     {
789         uint8_t  *p_src = h->mb.pic.p_fenc[1+ch];
790         uint8_t  *p_dst = h->mb.pic.p_fdec[1+ch];
791
792         if( !b_bidir )
793         {
794             h->mc.mc_chroma( h->mb.pic.p_fdec[1+ch],       FDEC_STRIDE,
795                              h->mb.pic.p_fref[0][0][4+ch], h->mb.pic.i_stride[1+ch],
796                              mvp[0], mvp[1], 8, 8 );
797         }
798
799         /* there is almost never a termination during chroma, but we can't avoid the check entirely */
800         /* so instead we check SSD and skip the actual check if the score is low enough. */
801         if( h->pixf.ssd[PIXEL_8x8]( p_dst, FDEC_STRIDE, p_src, FENC_STRIDE ) < thresh )
802             continue;
803
804         h->dctf.sub8x8_dct( dct4x4, p_src, p_dst );
805
806         /* calculate dct DC */
807         dct2x2dc( dct2x2, dct4x4 );
808         h->quantf.quant_2x2_dc( dct2x2, h->quant4_mf[CQM_4PC][i_qp][0]>>1, h->quant4_bias[CQM_4PC][i_qp][0]<<1 );
809         if( array_non_zero(dct2x2) )
810             return 0;
811
812         /* calculate dct coeffs */
813         for( i4x4 = 0, i_decimate_mb = 0; i4x4 < 4; i4x4++ )
814         {
815             h->quantf.quant_4x4( dct4x4[i4x4], h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
816             if( !array_non_zero(dct4x4[i4x4]) )
817                 continue;
818             h->zigzagf.scan_4x4( dctscan, dct4x4[i4x4] );
819             i_decimate_mb += h->quantf.decimate_score15( dctscan );
820             if( i_decimate_mb >= 7 )
821                 return 0;
822         }
823     }
824
825     h->mb.b_skip_mc = 1;
826     return 1;
827 }
828
829 /****************************************************************************
830  * DCT-domain noise reduction / adaptive deadzone
831  * from libavcodec
832  ****************************************************************************/
833
834 void x264_noise_reduction_update( x264_t *h )
835 {
836     int cat, i;
837     for( cat = 0; cat < 2; cat++ )
838     {
839         int size = cat ? 64 : 16;
840         const uint16_t *weight = cat ? x264_dct8_weight2_tab : x264_dct4_weight2_tab;
841
842         if( h->nr_count[cat] > (cat ? (1<<16) : (1<<18)) )
843         {
844             for( i = 0; i < size; i++ )
845                 h->nr_residual_sum[cat][i] >>= 1;
846             h->nr_count[cat] >>= 1;
847         }
848
849         for( i = 0; i < size; i++ )
850             h->nr_offset[cat][i] =
851                 ((uint64_t)h->param.analyse.i_noise_reduction * h->nr_count[cat]
852                  + h->nr_residual_sum[cat][i]/2)
853               / ((uint64_t)h->nr_residual_sum[cat][i] * weight[i]/256 + 1);
854     }
855 }
856
857 /*****************************************************************************
858  * RD only; 4 calls to this do not make up for one macroblock_encode.
859  * doesn't transform chroma dc.
860  *****************************************************************************/
861 void x264_macroblock_encode_p8x8( x264_t *h, int i8 )
862 {
863     int i_qp = h->mb.i_qp;
864     uint8_t *p_fenc = h->mb.pic.p_fenc[0] + (i8&1)*8 + (i8>>1)*8*FENC_STRIDE;
865     uint8_t *p_fdec = h->mb.pic.p_fdec[0] + (i8&1)*8 + (i8>>1)*8*FDEC_STRIDE;
866     int b_decimate = h->sh.i_type == SLICE_TYPE_B || h->param.analyse.b_dct_decimate;
867     int nnz8x8 = 0;
868     int ch;
869
870     x264_mb_mc_8x8( h, i8 );
871
872     if( h->mb.b_lossless )
873     {
874         int i4;
875         if( h->mb.b_transform_8x8 )
876         {
877             h->zigzagf.sub_8x8( h->dct.luma8x8[i8], p_fenc, p_fdec );
878             nnz8x8 = array_non_zero( h->dct.luma8x8[i8] );
879             *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+0]] = 0x0101 * nnz8x8;
880             *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+2]] = 0x0101 * nnz8x8;
881         }
882         else
883         {
884             for( i4 = i8*4; i4 < i8*4+4; i4++ )
885             {
886                 int nz;
887                 h->zigzagf.sub_4x4( h->dct.luma4x4[i4],
888                                     h->mb.pic.p_fenc[0]+block_idx_xy_fenc[i4],
889                                     h->mb.pic.p_fdec[0]+block_idx_xy_fdec[i4] );
890                 nz = array_non_zero( h->dct.luma4x4[i4] );
891                 h->mb.cache.non_zero_count[x264_scan8[i4]] = nz;
892                 nnz8x8 |= nz;
893             }
894         }
895         for( ch = 0; ch < 2; ch++ )
896         {
897             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
898             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
899             h->zigzagf.sub_4x4( h->dct.luma4x4[16+i8+ch*4], p_fenc, p_fdec );
900             h->dct.luma4x4[16+i8+ch*4][0] = 0;
901         }
902         h->mb.cache.non_zero_count[x264_scan8[16+i8]] = array_non_zero( h->dct.luma4x4[16+i8] );
903         h->mb.cache.non_zero_count[x264_scan8[20+i8]] = array_non_zero( h->dct.luma4x4[20+i8] );
904     }
905     else
906     {
907         if( h->mb.b_transform_8x8 )
908         {
909             DECLARE_ALIGNED_16( int16_t dct8x8[8][8] );
910             h->dctf.sub8x8_dct8( dct8x8, p_fenc, p_fdec );
911             x264_quant_8x8( h, dct8x8, i_qp, 0, i8 );
912             h->zigzagf.scan_8x8( h->dct.luma8x8[i8], dct8x8 );
913
914             if( b_decimate && !h->mb.b_trellis )
915                 nnz8x8 = 4 <= h->quantf.decimate_score64( h->dct.luma8x8[i8] );
916             else
917                 nnz8x8 = array_non_zero( dct8x8 );
918
919             if( nnz8x8 )
920             {
921                 h->quantf.dequant_8x8( dct8x8, h->dequant8_mf[CQM_8PY], i_qp );
922                 h->dctf.add8x8_idct8( p_fdec, dct8x8 );
923                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+0]] = 0x0101;
924                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+2]] = 0x0101;
925             }
926             else
927             {
928                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+0]] = 0;
929                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+2]] = 0;
930             }
931         }
932         else
933         {
934             int i4;
935             DECLARE_ALIGNED_16( int16_t dct4x4[4][4][4] );
936             h->dctf.sub8x8_dct( dct4x4, p_fenc, p_fdec );
937             for( i4 = 0; i4 < 4; i4++ )
938                 x264_quant_4x4( h, dct4x4[i4], i_qp, DCT_LUMA_4x4, 0, i8*4+i4 );
939
940             for( i4 = 0; i4 < 4; i4++ )
941                 h->zigzagf.scan_4x4( h->dct.luma4x4[i8*4+i4], dct4x4[i4] );
942
943             if( b_decimate )
944             {
945                 int i_decimate_8x8 = 0;
946                 for( i4 = 0; i4 < 4 && i_decimate_8x8 < 4; i4++ )
947                     i_decimate_8x8 += h->quantf.decimate_score16( h->dct.luma4x4[i8*4+i4] );
948                 nnz8x8 = 4 <= i_decimate_8x8;
949             }
950             else
951                 nnz8x8 = array_non_zero( dct4x4 );
952
953             if( nnz8x8 )
954             {
955                 for( i4 = 0; i4 < 4; i4++ )
956                 {
957                     if( array_non_zero( dct4x4[i4] ) )
958                     {
959                         h->quantf.dequant_4x4( dct4x4[i4], h->dequant4_mf[CQM_4PY], i_qp );
960                         h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = 1;
961                     }
962                     else
963                         h->mb.cache.non_zero_count[x264_scan8[i8*4+i4]] = 0;
964                 }
965                 h->dctf.add8x8_idct( p_fdec, dct4x4 );
966             }
967             else
968             {
969                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+0]] = 0;
970                 *(uint16_t*)&h->mb.cache.non_zero_count[x264_scan8[i8*4+2]] = 0;
971             }
972         }
973
974         i_qp = h->mb.i_chroma_qp;
975
976         for( ch = 0; ch < 2; ch++ )
977         {
978             DECLARE_ALIGNED_16( int16_t dct4x4[4][4] );
979             p_fenc = h->mb.pic.p_fenc[1+ch] + (i8&1)*4 + (i8>>1)*4*FENC_STRIDE;
980             p_fdec = h->mb.pic.p_fdec[1+ch] + (i8&1)*4 + (i8>>1)*4*FDEC_STRIDE;
981
982             h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
983             dct4x4[0][0] = 0;
984
985             if( h->mb.b_trellis )
986                 x264_quant_4x4_trellis( h, dct4x4, CQM_4PC, i_qp, DCT_CHROMA_AC, 0, 0 );
987             else
988                 h->quantf.quant_4x4( dct4x4, h->quant4_mf[CQM_4PC][i_qp], h->quant4_bias[CQM_4PC][i_qp] );
989
990             if( array_non_zero( dct4x4 ) )
991             {
992                 h->zigzagf.scan_4x4( h->dct.luma4x4[16+i8+ch*4], dct4x4 );
993                 h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PC], i_qp );
994                 h->dctf.add4x4_idct( p_fdec, dct4x4 );
995                 h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = 1;
996             }
997             else
998                 h->mb.cache.non_zero_count[x264_scan8[16+i8+ch*4]] = 0;
999         }
1000     }
1001     h->mb.i_cbp_luma &= ~(1 << i8);
1002     h->mb.i_cbp_luma |= nnz8x8 << i8;
1003     h->mb.i_cbp_chroma = 0x02;
1004 }
1005
1006 /*****************************************************************************
1007  * RD only, luma only
1008  *****************************************************************************/
1009 void x264_macroblock_encode_p4x4( x264_t *h, int i4 )
1010 {
1011     int i_qp = h->mb.i_qp;
1012     uint8_t *p_fenc = &h->mb.pic.p_fenc[0][block_idx_xy_fenc[i4]];
1013     uint8_t *p_fdec = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
1014     const int i_ref = h->mb.cache.ref[0][x264_scan8[i4]];
1015     const int mvx   = x264_clip3( h->mb.cache.mv[0][x264_scan8[i4]][0], h->mb.mv_min[0], h->mb.mv_max[0] );
1016     const int mvy   = x264_clip3( h->mb.cache.mv[0][x264_scan8[i4]][1], h->mb.mv_min[1], h->mb.mv_max[1] );
1017
1018     h->mc.mc_luma( p_fdec, FDEC_STRIDE, h->mb.pic.p_fref[0][i_ref], h->mb.pic.i_stride[0], mvx + 4*4*block_idx_x[i4], mvy + 4*4*block_idx_y[i4], 4, 4 );
1019
1020     if( h->mb.b_lossless )
1021     {
1022         h->zigzagf.sub_4x4( h->dct.luma4x4[i4], p_fenc, p_fdec );
1023         h->mb.cache.non_zero_count[x264_scan8[i4]] = array_non_zero( h->dct.luma4x4[i4] );
1024     }
1025     else
1026     {
1027         DECLARE_ALIGNED_16( int16_t dct4x4[4][4] );
1028         h->dctf.sub4x4_dct( dct4x4, p_fenc, p_fdec );
1029         x264_quant_4x4( h, dct4x4, i_qp, DCT_LUMA_4x4, 0, i4 );
1030         if( array_non_zero( dct4x4 ) )
1031         {
1032             h->zigzagf.scan_4x4( h->dct.luma4x4[i4], dct4x4 );
1033             h->quantf.dequant_4x4( dct4x4, h->dequant4_mf[CQM_4PY], i_qp );
1034             h->dctf.add4x4_idct( p_fdec, dct4x4 );
1035             h->mb.cache.non_zero_count[x264_scan8[i4]] = 1;
1036         }
1037         else
1038             h->mb.cache.non_zero_count[x264_scan8[i4]] = 0;
1039     }
1040 }