1 /*****************************************************************************
2 * macroblock.c: macroblock common functions
3 *****************************************************************************
4 * Copyright (C) 2003-2011 x264 project
6 * Authors: Fiona Glaser <fiona@x264.com>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Loren Merritt <lorenm@u.washington.edu>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
29 #include "encoder/me.h"
31 #define MC_LUMA(list,p) \
32 h->mc.mc_luma( &h->mb.pic.p_fdec[p][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, \
33 &h->mb.pic.p_fref[list][i_ref][p*4], h->mb.pic.i_stride[p], \
34 mvx, mvy, 4*width, 4*height, \
35 list ? x264_weight_none : &h->sh.weight[i_ref][p] );
37 static NOINLINE void x264_mb_mc_0xywh( x264_t *h, int x, int y, int width, int height )
39 int i8 = x264_scan8[0]+x+8*y;
40 int i_ref = h->mb.cache.ref[0][i8];
41 int mvx = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
42 int mvy = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
53 // chroma is offset if MCing from a field of opposite parity
54 if( MB_INTERLACED & i_ref )
55 mvy += (h->mb.i_mb_y & 1)*4 - 2;
57 h->mc.mc_chroma( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x],
58 &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
59 h->mb.pic.p_fref[0][i_ref][4], h->mb.pic.i_stride[1],
60 mvx, mvy, 2*width, 2*height );
62 if( h->sh.weight[i_ref][1].weightfn )
63 h->sh.weight[i_ref][1].weightfn[width>>1]( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
64 &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
65 &h->sh.weight[i_ref][1], height*2 );
66 if( h->sh.weight[i_ref][2].weightfn )
67 h->sh.weight[i_ref][2].weightfn[width>>1]( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
68 &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
69 &h->sh.weight[i_ref][2],height*2 );
72 static NOINLINE void x264_mb_mc_1xywh( x264_t *h, int x, int y, int width, int height )
74 int i8 = x264_scan8[0]+x+8*y;
75 int i_ref = h->mb.cache.ref[1][i8];
76 int mvx = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
77 int mvy = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
88 if( MB_INTERLACED & i_ref )
89 mvy += (h->mb.i_mb_y & 1)*4 - 2;
91 h->mc.mc_chroma( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x],
92 &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
93 h->mb.pic.p_fref[1][i_ref][4], h->mb.pic.i_stride[1],
94 mvx, mvy, 2*width, 2*height );
98 #define MC_LUMA_BI(p) \
99 src0 = h->mc.get_ref( tmp0, &i_stride0, &h->mb.pic.p_fref[0][i_ref0][p*4], h->mb.pic.i_stride[p], \
100 mvx0, mvy0, 4*width, 4*height, x264_weight_none ); \
101 src1 = h->mc.get_ref( tmp1, &i_stride1, &h->mb.pic.p_fref[1][i_ref1][p*4], h->mb.pic.i_stride[p], \
102 mvx1, mvy1, 4*width, 4*height, x264_weight_none ); \
103 h->mc.avg[i_mode]( &h->mb.pic.p_fdec[p][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE, \
104 src0, i_stride0, src1, i_stride1, weight );
106 static NOINLINE void x264_mb_mc_01xywh( x264_t *h, int x, int y, int width, int height )
108 int i8 = x264_scan8[0]+x+8*y;
109 int i_ref0 = h->mb.cache.ref[0][i8];
110 int i_ref1 = h->mb.cache.ref[1][i8];
111 int weight = h->mb.bipred_weight[i_ref0][i_ref1];
112 int mvx0 = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
113 int mvx1 = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
114 int mvy0 = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
115 int mvy1 = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
116 int i_mode = x264_size2pixel[height][width];
117 int i_stride0 = 16, i_stride1 = 16;
118 ALIGNED_ARRAY_16( pixel, tmp0,[16*16] );
119 ALIGNED_ARRAY_16( pixel, tmp1,[16*16] );
131 if( MB_INTERLACED & i_ref0 )
132 mvy0 += (h->mb.i_mb_y & 1)*4 - 2;
133 if( MB_INTERLACED & i_ref1 )
134 mvy1 += (h->mb.i_mb_y & 1)*4 - 2;
136 h->mc.mc_chroma( tmp0, tmp0+8, 16, h->mb.pic.p_fref[0][i_ref0][4], h->mb.pic.i_stride[1],
137 mvx0, mvy0, 2*width, 2*height );
138 h->mc.mc_chroma( tmp1, tmp1+8, 16, h->mb.pic.p_fref[1][i_ref1][4], h->mb.pic.i_stride[1],
139 mvx1, mvy1, 2*width, 2*height );
140 h->mc.avg[i_mode+3]( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp0, 16, tmp1, 16, weight );
141 h->mc.avg[i_mode+3]( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp0+8, 16, tmp1+8, 16, weight );
148 void x264_mb_mc_8x8( x264_t *h, int i8 )
153 if( h->sh.i_type == SLICE_TYPE_P )
155 switch( h->mb.i_sub_partition[i8] )
158 x264_mb_mc_0xywh( h, x, y, 2, 2 );
161 x264_mb_mc_0xywh( h, x, y+0, 2, 1 );
162 x264_mb_mc_0xywh( h, x, y+1, 2, 1 );
165 x264_mb_mc_0xywh( h, x+0, y, 1, 2 );
166 x264_mb_mc_0xywh( h, x+1, y, 1, 2 );
169 x264_mb_mc_0xywh( h, x+0, y+0, 1, 1 );
170 x264_mb_mc_0xywh( h, x+1, y+0, 1, 1 );
171 x264_mb_mc_0xywh( h, x+0, y+1, 1, 1 );
172 x264_mb_mc_0xywh( h, x+1, y+1, 1, 1 );
178 int scan8 = x264_scan8[0] + x + 8*y;
180 if( h->mb.cache.ref[0][scan8] >= 0 )
181 if( h->mb.cache.ref[1][scan8] >= 0 )
182 x264_mb_mc_01xywh( h, x, y, 2, 2 );
184 x264_mb_mc_0xywh( h, x, y, 2, 2 );
186 x264_mb_mc_1xywh( h, x, y, 2, 2 );
190 void x264_mb_mc( x264_t *h )
192 if( h->mb.i_partition == D_8x8 )
194 for( int i = 0; i < 4; i++ )
195 x264_mb_mc_8x8( h, i );
199 int ref0a = h->mb.cache.ref[0][x264_scan8[ 0]];
200 int ref0b = h->mb.cache.ref[0][x264_scan8[12]];
201 int ref1a = h->mb.cache.ref[1][x264_scan8[ 0]];
202 int ref1b = h->mb.cache.ref[1][x264_scan8[12]];
204 if( h->mb.i_partition == D_16x16 )
207 if( ref1a >= 0 ) x264_mb_mc_01xywh( h, 0, 0, 4, 4 );
208 else x264_mb_mc_0xywh ( h, 0, 0, 4, 4 );
209 else x264_mb_mc_1xywh ( h, 0, 0, 4, 4 );
211 else if( h->mb.i_partition == D_16x8 )
214 if( ref1a >= 0 ) x264_mb_mc_01xywh( h, 0, 0, 4, 2 );
215 else x264_mb_mc_0xywh ( h, 0, 0, 4, 2 );
216 else x264_mb_mc_1xywh ( h, 0, 0, 4, 2 );
219 if( ref1b >= 0 ) x264_mb_mc_01xywh( h, 0, 2, 4, 2 );
220 else x264_mb_mc_0xywh ( h, 0, 2, 4, 2 );
221 else x264_mb_mc_1xywh ( h, 0, 2, 4, 2 );
223 else if( h->mb.i_partition == D_8x16 )
226 if( ref1a >= 0 ) x264_mb_mc_01xywh( h, 0, 0, 2, 4 );
227 else x264_mb_mc_0xywh ( h, 0, 0, 2, 4 );
228 else x264_mb_mc_1xywh ( h, 0, 0, 2, 4 );
231 if( ref1b >= 0 ) x264_mb_mc_01xywh( h, 2, 0, 2, 4 );
232 else x264_mb_mc_0xywh ( h, 2, 0, 2, 4 );
233 else x264_mb_mc_1xywh ( h, 2, 0, 2, 4 );
238 int x264_macroblock_cache_allocate( x264_t *h )
240 int i_mb_count = h->mb.i_mb_count;
242 h->mb.i_mb_stride = h->mb.i_mb_width;
243 h->mb.i_b8_stride = h->mb.i_mb_width * 2;
244 h->mb.i_b4_stride = h->mb.i_mb_width * 4;
246 h->mb.b_interlaced = PARAM_INTERLACED;
248 CHECKED_MALLOC( h->mb.qp, i_mb_count * sizeof(int8_t) );
249 CHECKED_MALLOC( h->mb.cbp, i_mb_count * sizeof(int16_t) );
250 CHECKED_MALLOC( h->mb.skipbp, i_mb_count * sizeof(int8_t) );
251 CHECKED_MALLOC( h->mb.mb_transform_size, i_mb_count * sizeof(int8_t) );
252 CHECKED_MALLOC( h->mb.slice_table, i_mb_count * sizeof(uint16_t) );
253 memset( h->mb.slice_table, -1, i_mb_count * sizeof(uint16_t) );
255 /* 0 -> 3 top(4), 4 -> 6 : left(3) */
256 CHECKED_MALLOC( h->mb.intra4x4_pred_mode, i_mb_count * 8 * sizeof(int8_t) );
259 CHECKED_MALLOC( h->mb.non_zero_count, i_mb_count * 48 * sizeof(uint8_t) );
261 if( h->param.b_cabac )
263 CHECKED_MALLOC( h->mb.chroma_pred_mode, i_mb_count * sizeof(int8_t) );
264 CHECKED_MALLOC( h->mb.mvd[0], i_mb_count * sizeof( **h->mb.mvd ) );
265 CHECKED_MALLOC( h->mb.mvd[1], i_mb_count * sizeof( **h->mb.mvd ) );
268 for( int i = 0; i < 2; i++ )
270 int i_refs = X264_MIN(X264_REF_MAX, (i ? 1 + !!h->param.i_bframe_pyramid : h->param.i_frame_reference) ) << PARAM_INTERLACED;
271 if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
272 i_refs = X264_MIN(X264_REF_MAX, i_refs + 1 + (BIT_DEPTH == 8)); //smart weights add two duplicate frames, one in >8-bit
274 for( int j = !i; j < i_refs; j++ )
276 CHECKED_MALLOC( h->mb.mvr[i][j], 2 * (i_mb_count + 1) * sizeof(int16_t) );
277 M32( h->mb.mvr[i][j][0] ) = 0;
282 if( h->param.analyse.i_weighted_pred )
284 int i_padv = PADV << PARAM_INTERLACED;
285 int luma_plane_size = 0;
288 if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_FAKE )
290 // only need buffer for lookahead
291 if( !h->param.i_sync_lookahead || h == h->thread[h->param.i_threads] )
293 // Fake analysis only works on lowres
294 luma_plane_size = h->fdec->i_stride_lowres * (h->mb.i_mb_height*8+2*i_padv);
295 // Only need 1 buffer for analysis
303 luma_plane_size = h->fdec->i_stride[0] * (h->mb.i_mb_height*16+2*i_padv);
305 if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
306 //smart can weight one ref and one offset -1 in 8-bit
307 numweightbuf = 1 + (BIT_DEPTH == 8);
309 //simple only has one weighted ref
313 for( int i = 0; i < numweightbuf; i++ )
314 CHECKED_MALLOC( h->mb.p_weight_buf[i], luma_plane_size * sizeof(pixel) );
321 void x264_macroblock_cache_free( x264_t *h )
323 for( int i = 0; i < 2; i++ )
324 for( int j = !i; j < X264_REF_MAX*2; j++ )
325 if( h->mb.mvr[i][j] )
326 x264_free( h->mb.mvr[i][j]-1 );
327 for( int i = 0; i < X264_REF_MAX; i++ )
328 x264_free( h->mb.p_weight_buf[i] );
330 if( h->param.b_cabac )
332 x264_free( h->mb.chroma_pred_mode );
333 x264_free( h->mb.mvd[0] );
334 x264_free( h->mb.mvd[1] );
336 x264_free( h->mb.slice_table );
337 x264_free( h->mb.intra4x4_pred_mode );
338 x264_free( h->mb.non_zero_count );
339 x264_free( h->mb.mb_transform_size );
340 x264_free( h->mb.skipbp );
341 x264_free( h->mb.cbp );
342 x264_free( h->mb.qp );
345 int x264_macroblock_thread_allocate( x264_t *h, int b_lookahead )
349 for( int i = 0; i <= 4*PARAM_INTERLACED; i++ )
350 for( int j = 0; j < (CHROMA444 ? 3 : 2); j++ )
352 /* shouldn't really be initialized, just silences a valgrind false-positive in predict_8x8_filter_mmx */
353 CHECKED_MALLOCZERO( h->intra_border_backup[i][j], (h->sps->i_mb_width*16+32) * sizeof(pixel) );
354 h->intra_border_backup[i][j] += 16;
355 if( !PARAM_INTERLACED )
356 h->intra_border_backup[1][j] = h->intra_border_backup[i][j];
358 for( int i = 0; i <= PARAM_INTERLACED; i++ )
360 CHECKED_MALLOC( h->deblock_strength[i], sizeof(**h->deblock_strength) * h->mb.i_mb_width );
361 h->deblock_strength[1] = h->deblock_strength[i];
365 /* Allocate scratch buffer */
366 int scratch_size = 0;
369 int buf_hpel = (h->thread[0]->fdec->i_width[0]+48) * sizeof(int16_t);
370 int buf_ssim = h->param.analyse.b_ssim * 8 * (h->param.i_width/4+3) * sizeof(int);
371 int me_range = X264_MIN(h->param.analyse.i_me_range, h->param.analyse.i_mv_range);
372 int buf_tesa = (h->param.analyse.i_me_method >= X264_ME_ESA) *
373 ((me_range*2+24) * sizeof(int16_t) + (me_range+4) * (me_range+1) * 4 * sizeof(mvsad_t));
374 scratch_size = X264_MAX3( buf_hpel, buf_ssim, buf_tesa );
376 int buf_mbtree = h->param.rc.b_mb_tree * ((h->mb.i_mb_width+7)&~7) * sizeof(int);
377 scratch_size = X264_MAX( scratch_size, buf_mbtree );
379 CHECKED_MALLOC( h->scratch_buffer, scratch_size );
381 h->scratch_buffer = NULL;
388 void x264_macroblock_thread_free( x264_t *h, int b_lookahead )
392 for( int i = 0; i <= PARAM_INTERLACED; i++ )
393 x264_free( h->deblock_strength[i] );
394 for( int i = 0; i <= 4*PARAM_INTERLACED; i++ )
395 for( int j = 0; j < (CHROMA444 ? 3 : 2); j++ )
396 x264_free( h->intra_border_backup[i][j] - 16 );
398 x264_free( h->scratch_buffer );
401 void x264_macroblock_slice_init( x264_t *h )
403 h->mb.mv[0] = h->fdec->mv[0];
404 h->mb.mv[1] = h->fdec->mv[1];
405 h->mb.mvr[0][0] = h->fdec->mv16x16;
406 h->mb.ref[0] = h->fdec->ref[0];
407 h->mb.ref[1] = h->fdec->ref[1];
408 h->mb.type = h->fdec->mb_type;
409 h->mb.partition = h->fdec->mb_partition;
410 h->mb.field = h->fdec->field;
412 h->fdec->i_ref[0] = h->i_ref[0];
413 h->fdec->i_ref[1] = h->i_ref[1];
414 for( int i = 0; i < h->i_ref[0]; i++ )
415 h->fdec->ref_poc[0][i] = h->fref[0][i]->i_poc;
416 if( h->sh.i_type == SLICE_TYPE_B )
418 for( int i = 0; i < h->i_ref[1]; i++ )
419 h->fdec->ref_poc[1][i] = h->fref[1][i]->i_poc;
421 map_col_to_list0(-1) = -1;
422 map_col_to_list0(-2) = -2;
423 for( int i = 0; i < h->fref[1][0]->i_ref[0]; i++ )
425 int poc = h->fref[1][0]->ref_poc[0][i];
426 map_col_to_list0(i) = -2;
427 for( int j = 0; j < h->i_ref[0]; j++ )
428 if( h->fref[0][j]->i_poc == poc )
430 map_col_to_list0(i) = j;
435 else if( h->sh.i_type == SLICE_TYPE_P )
437 memset( h->mb.cache.skip, 0, sizeof( h->mb.cache.skip ) );
439 if( h->sh.i_disable_deblocking_filter_idc != 1 && h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
441 deblock_ref_table(-2) = -2;
442 deblock_ref_table(-1) = -1;
443 for( int i = 0; i < h->i_ref[0] << SLICE_MBAFF; i++ )
445 /* Mask off high bits to avoid frame num collisions with -1/-2.
446 * In current x264 frame num values don't cover a range of more
447 * than 32, so 6 bits is enough for uniqueness. */
449 deblock_ref_table(i) = h->fref[0][i]->i_frame_num&63;
451 deblock_ref_table(i) = ((h->fref[0][i>>1]->i_frame_num&63)<<1) + (i&1);
456 /* init with not available (for top right idx=7,15) */
457 memset( h->mb.cache.ref, -2, sizeof( h->mb.cache.ref ) );
459 if( h->i_ref[0] > 0 )
460 for( int field = 0; field <= SLICE_MBAFF; field++ )
462 int curpoc = h->fdec->i_poc + h->fdec->i_delta_poc[field];
463 int refpoc = h->fref[0][0]->i_poc + h->fref[0][0]->i_delta_poc[field];
464 int delta = curpoc - refpoc;
466 h->fdec->inv_ref_poc[field] = (256 + delta/2) / delta;
469 h->mb.i_neighbour4[6] =
470 h->mb.i_neighbour4[9] =
471 h->mb.i_neighbour4[12] =
472 h->mb.i_neighbour4[14] = MB_LEFT|MB_TOP|MB_TOPLEFT|MB_TOPRIGHT;
473 h->mb.i_neighbour4[3] =
474 h->mb.i_neighbour4[7] =
475 h->mb.i_neighbour4[11] =
476 h->mb.i_neighbour4[13] =
477 h->mb.i_neighbour4[15] =
478 h->mb.i_neighbour8[3] = MB_LEFT|MB_TOP|MB_TOPLEFT;
481 void x264_macroblock_thread_init( x264_t *h )
483 h->mb.i_me_method = h->param.analyse.i_me_method;
484 h->mb.i_subpel_refine = h->param.analyse.i_subpel_refine;
485 if( h->sh.i_type == SLICE_TYPE_B && (h->mb.i_subpel_refine == 6 || h->mb.i_subpel_refine == 8) )
486 h->mb.i_subpel_refine--;
487 h->mb.b_chroma_me = h->param.analyse.b_chroma_me &&
488 ((h->sh.i_type == SLICE_TYPE_P && h->mb.i_subpel_refine >= 5) ||
489 (h->sh.i_type == SLICE_TYPE_B && h->mb.i_subpel_refine >= 9));
490 h->mb.b_dct_decimate = h->sh.i_type == SLICE_TYPE_B ||
491 (h->param.analyse.b_dct_decimate && h->sh.i_type != SLICE_TYPE_I);
492 h->mb.i_mb_prev_xy = -1;
494 h->mb.pic.p_fenc[0] = h->mb.pic.fenc_buf;
495 h->mb.pic.p_fdec[0] = h->mb.pic.fdec_buf + 2*FDEC_STRIDE;
496 h->mb.pic.p_fenc[1] = h->mb.pic.fenc_buf + 16*FENC_STRIDE;
497 h->mb.pic.p_fdec[1] = h->mb.pic.fdec_buf + 19*FDEC_STRIDE;
500 h->mb.pic.p_fenc[2] = h->mb.pic.fenc_buf + 32*FENC_STRIDE;
501 h->mb.pic.p_fdec[2] = h->mb.pic.fdec_buf + 36*FDEC_STRIDE;
515 h->mb.pic.p_fenc[2] = h->mb.pic.fenc_buf + 16*FENC_STRIDE + 8;
516 h->mb.pic.p_fdec[2] = h->mb.pic.fdec_buf + 19*FDEC_STRIDE + 16;
520 void x264_prefetch_fenc( x264_t *h, x264_frame_t *fenc, int i_mb_x, int i_mb_y )
522 int stride_y = fenc->i_stride[0];
523 int stride_uv = fenc->i_stride[1];
524 int off_y = 16 * i_mb_x + 16 * i_mb_y * stride_y;
525 int off_uv = 16 * i_mb_x + 8 * i_mb_y * stride_uv;
526 h->mc.prefetch_fenc( fenc->plane[0]+off_y, stride_y,
527 fenc->plane[1]+off_uv, stride_uv, i_mb_x );
530 NOINLINE void x264_copy_column8( pixel *dst, pixel *src )
532 // input pointers are offset by 4 rows because that's faster (smaller instruction size on x86)
533 for( int i = -4; i < 4; i++ )
534 dst[i*FDEC_STRIDE] = src[i*FDEC_STRIDE];
537 static void ALWAYS_INLINE x264_macroblock_load_pic_pointers( x264_t *h, int mb_x, int mb_y, int i, int b_chroma, int b_mbaff )
539 int mb_interlaced = b_mbaff && MB_INTERLACED;
540 int w = b_chroma ? 8 : 16;
541 int i_stride = h->fdec->i_stride[i];
542 int i_stride2 = i_stride << mb_interlaced;
543 int i_pix_offset = mb_interlaced
544 ? 16 * mb_x + w * (mb_y&~1) * i_stride + (mb_y&1) * i_stride
545 : 16 * mb_x + w * mb_y * i_stride;
546 pixel *plane_fdec = &h->fdec->plane[i][i_pix_offset];
547 int fdec_idx = b_mbaff ? (mb_interlaced ? (3 + (mb_y&1)) : (mb_y&1) ? 2 : 4) : 0;
548 pixel *intra_fdec = &h->intra_border_backup[fdec_idx][i][mb_x*16];
549 int ref_pix_offset[2] = { i_pix_offset, i_pix_offset };
550 /* ref_pix_offset[0] references the current field and [1] the opposite field. */
552 ref_pix_offset[1] += (1-2*(mb_y&1)) * i_stride;
553 h->mb.pic.i_stride[i] = i_stride2;
554 h->mb.pic.p_fenc_plane[i] = &h->fenc->plane[i][i_pix_offset];
557 h->mc.load_deinterleave_8x8x2_fenc( h->mb.pic.p_fenc[1], h->mb.pic.p_fenc_plane[1], i_stride2 );
558 memcpy( h->mb.pic.p_fdec[1]-FDEC_STRIDE, intra_fdec, 8*sizeof(pixel) );
559 memcpy( h->mb.pic.p_fdec[2]-FDEC_STRIDE, intra_fdec+8, 8*sizeof(pixel) );
562 h->mb.pic.p_fdec[1][-FDEC_STRIDE-1] = intra_fdec[-1-8];
563 h->mb.pic.p_fdec[2][-FDEC_STRIDE-1] = intra_fdec[-1];
568 h->mc.copy[PIXEL_16x16]( h->mb.pic.p_fenc[i], FENC_STRIDE, h->mb.pic.p_fenc_plane[i], i_stride2, 16 );
569 memcpy( h->mb.pic.p_fdec[i]-FDEC_STRIDE, intra_fdec, 24*sizeof(pixel) );
571 h->mb.pic.p_fdec[i][-FDEC_STRIDE-1] = intra_fdec[-1];
575 for( int j = 0; j < w; j++ )
578 h->mb.pic.p_fdec[1][-1+j*FDEC_STRIDE] = plane_fdec[-2+j*i_stride2];
579 h->mb.pic.p_fdec[2][-1+j*FDEC_STRIDE] = plane_fdec[-1+j*i_stride2];
582 h->mb.pic.p_fdec[i][-1+j*FDEC_STRIDE] = plane_fdec[-1+j*i_stride2];
584 pixel *plane_src, **filtered_src;
585 for( int j = 0; j < h->mb.pic.i_fref[0]; j++ )
587 // Interpolate between pixels in same field.
590 plane_src = h->fref[0][j>>1]->plane_fld[i];
591 filtered_src = h->fref[0][j>>1]->filtered_fld[i];
595 plane_src = h->fref[0][j]->plane[i];
596 filtered_src = h->fref[0][j]->filtered[i];
598 h->mb.pic.p_fref[0][j][i*4] = plane_src + ref_pix_offset[j&1];
602 for( int k = 1; k < 4; k++ )
603 h->mb.pic.p_fref[0][j][i*4+k] = filtered_src[k] + ref_pix_offset[j&1];
606 if( h->sh.weight[j][0].weightfn )
607 h->mb.pic.p_fref_w[j] = &h->fenc->weighted[j >> mb_interlaced][ref_pix_offset[j&1]];
609 h->mb.pic.p_fref_w[j] = h->mb.pic.p_fref[0][j][0];
613 if( h->sh.i_type == SLICE_TYPE_B )
614 for( int j = 0; j < h->mb.pic.i_fref[1]; j++ )
618 plane_src = h->fref[1][j>>1]->plane_fld[i];
619 filtered_src = h->fref[1][j>>1]->filtered_fld[i];
623 plane_src = h->fref[1][j]->plane[i];
624 filtered_src = h->fref[1][j]->filtered[i];
626 h->mb.pic.p_fref[1][j][i*4] = plane_src + ref_pix_offset[j&1];
629 for( int k = 1; k < 4; k++ )
630 h->mb.pic.p_fref[1][j][i*4+k] = filtered_src[k] + ref_pix_offset[j&1];
634 static const x264_left_table_t left_indices[4] =
636 /* Current is progressive */
637 {{ 4, 4, 5, 5}, { 3, 3, 7, 7}, {16+1, 16+1, 32+1, 32+1}, {0, 0, 1, 1}, {0, 0, 0, 0}},
638 {{ 6, 6, 3, 3}, {11, 11, 15, 15}, {16+5, 16+5, 32+5, 32+5}, {2, 2, 3, 3}, {1, 1, 1, 1}},
639 /* Current is interlaced */
640 {{ 4, 6, 4, 6}, { 3, 11, 3, 11}, {16+1, 16+1, 32+1, 32+1}, {0, 2, 0, 2}, {0, 1, 0, 1}},
642 {{ 4, 5, 6, 3}, { 3, 7, 11, 15}, {16+1, 16+5, 32+1, 32+5}, {0, 1, 2, 3}, {0, 0, 1, 1}}
645 static void ALWAYS_INLINE x264_macroblock_cache_load_neighbours( x264_t *h, int mb_x, int mb_y, int b_interlaced )
647 const int mb_interlaced = b_interlaced && MB_INTERLACED;
648 int top_y = mb_y - (1 << mb_interlaced);
649 int top = top_y * h->mb.i_mb_stride + mb_x;
653 h->mb.i_mb_xy = mb_y * h->mb.i_mb_stride + mb_x;
654 h->mb.i_b8_xy = 2*(mb_y * h->mb.i_b8_stride + mb_x);
655 h->mb.i_b4_xy = 4*(mb_y * h->mb.i_b4_stride + mb_x);
657 h->mb.left_b8[1] = -1;
659 h->mb.left_b4[1] = -1;
660 h->mb.i_neighbour = 0;
661 h->mb.i_neighbour_intra = 0;
662 h->mb.i_neighbour_frame = 0;
663 h->mb.i_mb_top_xy = -1;
664 h->mb.i_mb_top_y = -1;
665 h->mb.i_mb_left_xy[0] = h->mb.i_mb_left_xy[1] = -1;
666 h->mb.i_mb_topleft_xy = -1;
667 h->mb.i_mb_topright_xy = -1;
668 h->mb.i_mb_type_top = -1;
669 h->mb.i_mb_type_left[0] = h->mb.i_mb_type_left[1] = -1;
670 h->mb.i_mb_type_topleft = -1;
671 h->mb.i_mb_type_topright = -1;
672 h->mb.left_index_table = &left_indices[3];
673 h->mb.topleft_partition = 0;
675 int topleft_y = top_y;
676 int topright_y = top_y;
679 left[0] = left[1] = h->mb.i_mb_xy - 1;
680 h->mb.left_b8[0] = h->mb.left_b8[1] = h->mb.i_b8_xy - 2;
681 h->mb.left_b4[0] = h->mb.left_b4[1] = h->mb.i_b4_xy - 4;
685 h->mb.i_mb_top_mbpair_xy = h->mb.i_mb_xy - 2*h->mb.i_mb_stride;
686 h->mb.i_mb_topleft_y = -1;
687 h->mb.i_mb_topright_y = -1;
691 if( mb_x && mb_interlaced != h->mb.field[h->mb.i_mb_xy-1] )
693 left[0] = left[1] = h->mb.i_mb_xy - 1 - h->mb.i_mb_stride;
694 h->mb.left_b8[0] = h->mb.left_b8[1] = h->mb.i_b8_xy - 2 - 2*h->mb.i_b8_stride;
695 h->mb.left_b4[0] = h->mb.left_b4[1] = h->mb.i_b4_xy - 4 - 4*h->mb.i_b4_stride;
699 h->mb.left_index_table = &left_indices[2];
700 left[1] += h->mb.i_mb_stride;
701 h->mb.left_b8[1] += 2*h->mb.i_b8_stride;
702 h->mb.left_b4[1] += 4*h->mb.i_b4_stride;
706 h->mb.left_index_table = &left_indices[1];
708 h->mb.topleft_partition = 1;
716 if( mb_interlaced && top >= 0 )
718 if( !h->mb.field[top] )
720 top += h->mb.i_mb_stride;
724 topleft_y += !h->mb.field[h->mb.i_mb_stride*topleft_y + mb_x - 1];
725 if( mb_x < h->mb.i_mb_width-1 )
726 topright_y += !h->mb.field[h->mb.i_mb_stride*topright_y + mb_x + 1];
728 if( mb_x && mb_interlaced != h->mb.field[h->mb.i_mb_xy-1] )
732 h->mb.left_index_table = &left_indices[2];
733 left[1] += h->mb.i_mb_stride;
734 h->mb.left_b8[1] += 2*h->mb.i_b8_stride;
735 h->mb.left_b4[1] += 4*h->mb.i_b4_stride;
738 h->mb.left_index_table = &left_indices[0];
745 h->mb.i_neighbour_frame |= MB_LEFT;
746 h->mb.i_mb_left_xy[0] = left[0];
747 h->mb.i_mb_left_xy[1] = left[1];
748 h->mb.i_mb_type_left[0] = h->mb.type[h->mb.i_mb_left_xy[0]];
749 h->mb.i_mb_type_left[1] = h->mb.type[h->mb.i_mb_left_xy[1]];
750 if( h->mb.slice_table[left[0]] == h->sh.i_first_mb )
752 h->mb.i_neighbour |= MB_LEFT;
754 // FIXME: We don't currently support constrained intra + mbaff.
755 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_left[0] ) )
756 h->mb.i_neighbour_intra |= MB_LEFT;
760 /* We can't predict from the previous threadslice since it hasn't been encoded yet. */
761 if( (h->i_threadslice_start >> mb_interlaced) != (mb_y >> mb_interlaced) )
765 h->mb.i_neighbour_frame |= MB_TOP;
766 h->mb.i_mb_top_xy = top;
767 h->mb.i_mb_top_y = top_y;
768 h->mb.i_mb_type_top = h->mb.type[h->mb.i_mb_top_xy];
769 if( h->mb.slice_table[top] == h->sh.i_first_mb )
771 h->mb.i_neighbour |= MB_TOP;
773 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_top ) )
774 h->mb.i_neighbour_intra |= MB_TOP;
776 /* We only need to prefetch the top blocks because the left was just written
777 * to as part of the previous cache_save. Since most target CPUs use write-allocate
778 * caches, left blocks are near-guaranteed to be in L1 cache. Top--not so much. */
779 x264_prefetch( &h->mb.cbp[top] );
780 x264_prefetch( h->mb.intra4x4_pred_mode[top] );
781 x264_prefetch( &h->mb.non_zero_count[top][12] );
782 /* These aren't always allocated, but prefetching an invalid address can't hurt. */
783 x264_prefetch( &h->mb.mb_transform_size[top] );
784 x264_prefetch( &h->mb.skipbp[top] );
788 if( mb_x > 0 && topleft_y >= 0 )
790 h->mb.i_neighbour_frame |= MB_TOPLEFT;
791 h->mb.i_mb_topleft_xy = h->mb.i_mb_stride*topleft_y + mb_x - 1;
792 h->mb.i_mb_topleft_y = topleft_y;
793 h->mb.i_mb_type_topleft = h->mb.type[h->mb.i_mb_topleft_xy];
794 if( h->mb.slice_table[h->mb.i_mb_topleft_xy] == h->sh.i_first_mb )
796 h->mb.i_neighbour |= MB_TOPLEFT;
798 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_topleft ) )
799 h->mb.i_neighbour_intra |= MB_TOPLEFT;
803 if( mb_x < h->mb.i_mb_width - 1 && topright_y >= 0 )
805 h->mb.i_neighbour_frame |= MB_TOPRIGHT;
806 h->mb.i_mb_topright_xy = h->mb.i_mb_stride*topright_y + mb_x + 1;
807 h->mb.i_mb_topright_y = topright_y;
808 h->mb.i_mb_type_topright = h->mb.type[h->mb.i_mb_topright_xy];
809 if( h->mb.slice_table[h->mb.i_mb_topright_xy] == h->sh.i_first_mb )
811 h->mb.i_neighbour |= MB_TOPRIGHT;
813 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_topright ) )
814 h->mb.i_neighbour_intra |= MB_TOPRIGHT;
827 void ALWAYS_INLINE x264_macroblock_cache_load( x264_t *h, int mb_x, int mb_y, int b_mbaff )
829 x264_macroblock_cache_load_neighbours( h, mb_x, mb_y, b_mbaff );
831 int *left = h->mb.i_mb_left_xy;
832 int top = h->mb.i_mb_top_xy;
833 int top_y = h->mb.i_mb_top_y;
834 int s8x8 = h->mb.i_b8_stride;
835 int s4x4 = h->mb.i_b4_stride;
836 int top_8x8 = (2*top_y+1) * s8x8 + 2*mb_x;
837 int top_4x4 = (4*top_y+3) * s4x4 + 4*mb_x;
838 int lists = (1 << h->sh.i_type) & 3;
840 /* GCC pessimizes direct loads from heap-allocated arrays due to aliasing. */
841 /* By only dereferencing them once, we avoid this issue. */
842 int8_t (*i4x4)[8] = h->mb.intra4x4_pred_mode;
843 uint8_t (*nnz)[48] = h->mb.non_zero_count;
844 int16_t *cbp = h->mb.cbp;
846 const x264_left_table_t *left_index_table = h->mb.left_index_table;
849 if( h->mb.i_neighbour & MB_TOP )
851 h->mb.cache.i_cbp_top = cbp[top];
853 CP32( &h->mb.cache.intra4x4_pred_mode[x264_scan8[0] - 8], &i4x4[top][0] );
855 /* load non_zero_count */
856 CP32( &h->mb.cache.non_zero_count[x264_scan8[ 0] - 8], &nnz[top][12] );
857 CP32( &h->mb.cache.non_zero_count[x264_scan8[16] - 8], &nnz[top][16+4 + 8*CHROMA444] );
858 CP32( &h->mb.cache.non_zero_count[x264_scan8[32] - 8], &nnz[top][32+4 + 8*CHROMA444] );
860 /* Finish the prefetching */
861 for( int l = 0; l < lists; l++ )
863 x264_prefetch( &h->mb.mv[l][top_4x4-1] );
864 /* Top right being not in the same cacheline as top left will happen
865 * once every 4 MBs, so one extra prefetch is worthwhile */
866 x264_prefetch( &h->mb.mv[l][top_4x4+4] );
867 x264_prefetch( &h->mb.ref[l][top_8x8-1] );
868 x264_prefetch( &h->mb.mvd[l][top] );
873 h->mb.cache.i_cbp_top = -1;
876 M32( &h->mb.cache.intra4x4_pred_mode[x264_scan8[0] - 8] ) = 0xFFFFFFFFU;
878 /* load non_zero_count */
879 M32( &h->mb.cache.non_zero_count[x264_scan8[ 0] - 8] ) = 0x80808080U;
880 M32( &h->mb.cache.non_zero_count[x264_scan8[16] - 8] ) = 0x80808080U;
881 M32( &h->mb.cache.non_zero_count[x264_scan8[32] - 8] ) = 0x80808080U;
884 if( h->mb.i_neighbour & MB_LEFT )
886 int ltop = left[LTOP];
887 int lbot = b_mbaff ? left[LBOT] : ltop;
890 const int16_t top_luma = (cbp[ltop] >> (left_index_table->mv[0]&(~1))) & 2;
891 const int16_t bot_luma = (cbp[lbot] >> (left_index_table->mv[2]&(~1))) & 2;
892 h->mb.cache.i_cbp_left = (cbp[ltop] & 0xfff0) | (bot_luma<<2) | top_luma;
895 h->mb.cache.i_cbp_left = cbp[ltop];
898 h->mb.cache.intra4x4_pred_mode[x264_scan8[ 0] - 1] = i4x4[ltop][left_index_table->intra[0]];
899 h->mb.cache.intra4x4_pred_mode[x264_scan8[ 2] - 1] = i4x4[ltop][left_index_table->intra[1]];
900 h->mb.cache.intra4x4_pred_mode[x264_scan8[ 8] - 1] = i4x4[lbot][left_index_table->intra[2]];
901 h->mb.cache.intra4x4_pred_mode[x264_scan8[10] - 1] = i4x4[lbot][left_index_table->intra[3]];
903 /* load non_zero_count */
904 h->mb.cache.non_zero_count[x264_scan8[ 0] - 1] = nnz[ltop][left_index_table->nnz[0]];
905 h->mb.cache.non_zero_count[x264_scan8[ 2] - 1] = nnz[ltop][left_index_table->nnz[1]];
906 h->mb.cache.non_zero_count[x264_scan8[ 8] - 1] = nnz[lbot][left_index_table->nnz[2]];
907 h->mb.cache.non_zero_count[x264_scan8[10] - 1] = nnz[lbot][left_index_table->nnz[3]];
911 h->mb.cache.non_zero_count[x264_scan8[16+ 0] - 1] = nnz[ltop][left_index_table->nnz[0]+16];
912 h->mb.cache.non_zero_count[x264_scan8[16+ 2] - 1] = nnz[ltop][left_index_table->nnz[1]+16];
913 h->mb.cache.non_zero_count[x264_scan8[16+ 8] - 1] = nnz[lbot][left_index_table->nnz[2]+16];
914 h->mb.cache.non_zero_count[x264_scan8[16+10] - 1] = nnz[lbot][left_index_table->nnz[3]+16];
915 h->mb.cache.non_zero_count[x264_scan8[32+ 0] - 1] = nnz[ltop][left_index_table->nnz[0]+32];
916 h->mb.cache.non_zero_count[x264_scan8[32+ 2] - 1] = nnz[ltop][left_index_table->nnz[1]+32];
917 h->mb.cache.non_zero_count[x264_scan8[32+ 8] - 1] = nnz[lbot][left_index_table->nnz[2]+32];
918 h->mb.cache.non_zero_count[x264_scan8[32+10] - 1] = nnz[lbot][left_index_table->nnz[3]+32];
922 h->mb.cache.non_zero_count[x264_scan8[16+ 0] - 1] = nnz[ltop][left_index_table->nnz_chroma[0]];
923 h->mb.cache.non_zero_count[x264_scan8[16+ 2] - 1] = nnz[lbot][left_index_table->nnz_chroma[1]];
924 h->mb.cache.non_zero_count[x264_scan8[32+ 0] - 1] = nnz[ltop][left_index_table->nnz_chroma[2]];
925 h->mb.cache.non_zero_count[x264_scan8[32+ 2] - 1] = nnz[lbot][left_index_table->nnz_chroma[3]];
930 h->mb.cache.i_cbp_left = -1;
932 h->mb.cache.intra4x4_pred_mode[x264_scan8[ 0] - 1] =
933 h->mb.cache.intra4x4_pred_mode[x264_scan8[ 2] - 1] =
934 h->mb.cache.intra4x4_pred_mode[x264_scan8[ 8] - 1] =
935 h->mb.cache.intra4x4_pred_mode[x264_scan8[10] - 1] = -1;
937 /* load non_zero_count */
938 h->mb.cache.non_zero_count[x264_scan8[ 0] - 1] =
939 h->mb.cache.non_zero_count[x264_scan8[ 2] - 1] =
940 h->mb.cache.non_zero_count[x264_scan8[ 8] - 1] =
941 h->mb.cache.non_zero_count[x264_scan8[10] - 1] =
942 h->mb.cache.non_zero_count[x264_scan8[16+ 0] - 1] =
943 h->mb.cache.non_zero_count[x264_scan8[16+ 2] - 1] =
944 h->mb.cache.non_zero_count[x264_scan8[32+ 0] - 1] =
945 h->mb.cache.non_zero_count[x264_scan8[32+ 2] - 1] = 0x80;
948 h->mb.cache.non_zero_count[x264_scan8[16+ 8] - 1] =
949 h->mb.cache.non_zero_count[x264_scan8[16+10] - 1] =
950 h->mb.cache.non_zero_count[x264_scan8[32+ 8] - 1] =
951 h->mb.cache.non_zero_count[x264_scan8[32+10] - 1] = 0x80;
955 if( h->pps->b_transform_8x8_mode )
957 h->mb.cache.i_neighbour_transform_size =
958 ( (h->mb.i_neighbour & MB_LEFT) && h->mb.mb_transform_size[left[0]] )
959 + ( (h->mb.i_neighbour & MB_TOP) && h->mb.mb_transform_size[top] );
964 h->mb.pic.i_fref[0] = h->i_ref[0] << MB_INTERLACED;
965 h->mb.pic.i_fref[1] = h->i_ref[1] << MB_INTERLACED;
970 x264_copy_column8( h->mb.pic.p_fdec[0]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+ 4*FDEC_STRIDE );
971 x264_copy_column8( h->mb.pic.p_fdec[0]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+12*FDEC_STRIDE );
972 x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 0, 0, 0 );
975 x264_copy_column8( h->mb.pic.p_fdec[1]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+15+ 4*FDEC_STRIDE );
976 x264_copy_column8( h->mb.pic.p_fdec[1]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[1]+15+12*FDEC_STRIDE );
977 x264_copy_column8( h->mb.pic.p_fdec[2]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+15+ 4*FDEC_STRIDE );
978 x264_copy_column8( h->mb.pic.p_fdec[2]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[2]+15+12*FDEC_STRIDE );
979 x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 1, 0, 0 );
980 x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 2, 0, 0 );
984 x264_copy_column8( h->mb.pic.p_fdec[1]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+ 7+ 4*FDEC_STRIDE );
985 x264_copy_column8( h->mb.pic.p_fdec[2]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+ 7+ 4*FDEC_STRIDE );
986 x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 1, 1, 0 );
991 x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 0, 0, 1 );
994 x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 1, 0, 1 );
995 x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 2, 0, 1 );
998 x264_macroblock_load_pic_pointers( h, mb_x, mb_y, 1, 1, 1 );
1001 if( h->fdec->integral )
1003 int offset = 16 * (mb_x + mb_y * h->fdec->i_stride[0]);
1004 for( int list = 0; list < 2; list++ )
1005 for( int i = 0; i < h->mb.pic.i_fref[list]; i++ )
1006 h->mb.pic.p_integral[list][i] = &h->fref[list][i]->integral[offset];
1009 x264_prefetch_fenc( h, h->fenc, mb_x, mb_y );
1011 /* load ref/mv/mvd */
1012 for( int l = 0; l < lists; l++ )
1014 int16_t (*mv)[2] = h->mb.mv[l];
1015 int8_t *ref = h->mb.ref[l];
1017 int i8 = x264_scan8[0] - 1 - 1*8;
1018 if( h->mb.i_neighbour & MB_TOPLEFT )
1020 int ir = b_mbaff ? 2*(s8x8*h->mb.i_mb_topleft_y + mb_x-1)+1+s8x8 : top_8x8 - 1;
1021 int iv = b_mbaff ? 4*(s4x4*h->mb.i_mb_topleft_y + mb_x-1)+3+3*s4x4 : top_4x4 - 1;
1022 if( b_mbaff && h->mb.topleft_partition )
1024 /* Take motion vector from the middle of macroblock instead of
1025 * the bottom right as usual. */
1029 h->mb.cache.ref[l][i8] = ref[ir];
1030 CP32( h->mb.cache.mv[l][i8], mv[iv] );
1034 h->mb.cache.ref[l][i8] = -2;
1035 M32( h->mb.cache.mv[l][i8] ) = 0;
1038 i8 = x264_scan8[0] - 8;
1039 if( h->mb.i_neighbour & MB_TOP )
1041 h->mb.cache.ref[l][i8+0] =
1042 h->mb.cache.ref[l][i8+1] = ref[top_8x8 + 0];
1043 h->mb.cache.ref[l][i8+2] =
1044 h->mb.cache.ref[l][i8+3] = ref[top_8x8 + 1];
1045 CP128( h->mb.cache.mv[l][i8], mv[top_4x4] );
1049 M128( h->mb.cache.mv[l][i8] ) = M128_ZERO;
1050 M32( &h->mb.cache.ref[l][i8] ) = (uint8_t)(-2) * 0x01010101U;
1053 i8 = x264_scan8[0] + 4 - 1*8;
1054 if( h->mb.i_neighbour & MB_TOPRIGHT )
1056 int ir = b_mbaff ? 2*(s8x8*h->mb.i_mb_topright_y + (mb_x+1))+s8x8 : top_8x8 + 2;
1057 int iv = b_mbaff ? 4*(s4x4*h->mb.i_mb_topright_y + (mb_x+1))+3*s4x4 : top_4x4 + 4;
1058 h->mb.cache.ref[l][i8] = ref[ir];
1059 CP32( h->mb.cache.mv[l][i8], mv[iv] );
1062 h->mb.cache.ref[l][i8] = -2;
1064 i8 = x264_scan8[0] - 1;
1065 if( h->mb.i_neighbour & MB_LEFT )
1069 h->mb.cache.ref[l][i8+0*8] = ref[h->mb.left_b8[LTOP] + 1 + s8x8*left_index_table->ref[0]];
1070 h->mb.cache.ref[l][i8+1*8] = ref[h->mb.left_b8[LTOP] + 1 + s8x8*left_index_table->ref[1]];
1071 h->mb.cache.ref[l][i8+2*8] = ref[h->mb.left_b8[LBOT] + 1 + s8x8*left_index_table->ref[2]];
1072 h->mb.cache.ref[l][i8+3*8] = ref[h->mb.left_b8[LBOT] + 1 + s8x8*left_index_table->ref[3]];
1074 CP32( h->mb.cache.mv[l][i8+0*8], mv[h->mb.left_b4[LTOP] + 3 + s4x4*left_index_table->mv[0]] );
1075 CP32( h->mb.cache.mv[l][i8+1*8], mv[h->mb.left_b4[LTOP] + 3 + s4x4*left_index_table->mv[1]] );
1076 CP32( h->mb.cache.mv[l][i8+2*8], mv[h->mb.left_b4[LBOT] + 3 + s4x4*left_index_table->mv[2]] );
1077 CP32( h->mb.cache.mv[l][i8+3*8], mv[h->mb.left_b4[LBOT] + 3 + s4x4*left_index_table->mv[3]] );
1081 const int ir = h->mb.i_b8_xy - 1;
1082 const int iv = h->mb.i_b4_xy - 1;
1083 h->mb.cache.ref[l][i8+0*8] =
1084 h->mb.cache.ref[l][i8+1*8] = ref[ir + 0*s8x8];
1085 h->mb.cache.ref[l][i8+2*8] =
1086 h->mb.cache.ref[l][i8+3*8] = ref[ir + 1*s8x8];
1088 CP32( h->mb.cache.mv[l][i8+0*8], mv[iv + 0*s4x4] );
1089 CP32( h->mb.cache.mv[l][i8+1*8], mv[iv + 1*s4x4] );
1090 CP32( h->mb.cache.mv[l][i8+2*8], mv[iv + 2*s4x4] );
1091 CP32( h->mb.cache.mv[l][i8+3*8], mv[iv + 3*s4x4] );
1096 for( int i = 0; i < 4; i++ )
1098 h->mb.cache.ref[l][i8+i*8] = -2;
1099 M32( h->mb.cache.mv[l][i8+i*8] ) = 0;
1103 /* Extra logic for top right mv in mbaff.
1109 * If the top right of the 4x4 partitions labeled a, b and c in the
1110 * above diagram do not exist, but the entries d, e and f exist (in
1111 * the macroblock to the left) then use those instead.
1113 if( b_mbaff && (h->mb.i_neighbour & MB_LEFT) )
1115 if( MB_INTERLACED && !h->mb.field[h->mb.i_mb_xy-1] )
1117 h->mb.cache.topright_ref[l][0] = ref[h->mb.left_b8[0] + 1 + s8x8*0];
1118 h->mb.cache.topright_ref[l][1] = ref[h->mb.left_b8[0] + 1 + s8x8*1];
1119 h->mb.cache.topright_ref[l][2] = ref[h->mb.left_b8[1] + 1 + s8x8*0];
1120 CP32( h->mb.cache.topright_mv[l][0], mv[h->mb.left_b4[0] + 3 + s4x4*(left_index_table->mv[0]+1)] );
1121 CP32( h->mb.cache.topright_mv[l][1], mv[h->mb.left_b4[0] + 3 + s4x4*(left_index_table->mv[1]+1)] );
1122 CP32( h->mb.cache.topright_mv[l][2], mv[h->mb.left_b4[1] + 3 + s4x4*(left_index_table->mv[2]+1)] );
1124 else if( !MB_INTERLACED && h->mb.field[h->mb.i_mb_xy-1] )
1126 // Looking at the bottom field so always take the bottom macroblock of the pair.
1127 h->mb.cache.topright_ref[l][0] = ref[h->mb.left_b8[0] + 1 + s8x8*2 + s8x8*left_index_table->ref[0]];
1128 h->mb.cache.topright_ref[l][1] = ref[h->mb.left_b8[0] + 1 + s8x8*2 + s8x8*left_index_table->ref[0]];
1129 h->mb.cache.topright_ref[l][2] = ref[h->mb.left_b8[0] + 1 + s8x8*2 + s8x8*left_index_table->ref[2]];
1130 CP32( h->mb.cache.topright_mv[l][0], mv[h->mb.left_b4[0] + 3 + s4x4*4 + s4x4*left_index_table->mv[0]] );
1131 CP32( h->mb.cache.topright_mv[l][1], mv[h->mb.left_b4[0] + 3 + s4x4*4 + s4x4*left_index_table->mv[1]] );
1132 CP32( h->mb.cache.topright_mv[l][2], mv[h->mb.left_b4[0] + 3 + s4x4*4 + s4x4*left_index_table->mv[2]] );
1136 if( h->param.b_cabac )
1138 uint8_t (*mvd)[8][2] = h->mb.mvd[l];
1139 if( h->mb.i_neighbour & MB_TOP )
1140 CP64( h->mb.cache.mvd[l][x264_scan8[0] - 8], mvd[top][0] );
1142 M64( h->mb.cache.mvd[l][x264_scan8[0] - 8] ) = 0;
1144 if( h->mb.i_neighbour & MB_LEFT && (!b_mbaff || h->mb.cache.ref[l][x264_scan8[0]-1] >= 0) )
1146 CP16( h->mb.cache.mvd[l][x264_scan8[0 ] - 1], mvd[left[LTOP]][left_index_table->intra[0]] );
1147 CP16( h->mb.cache.mvd[l][x264_scan8[2 ] - 1], mvd[left[LTOP]][left_index_table->intra[1]] );
1151 M16( h->mb.cache.mvd[l][x264_scan8[0]-1+0*8] ) = 0;
1152 M16( h->mb.cache.mvd[l][x264_scan8[0]-1+1*8] ) = 0;
1154 if( h->mb.i_neighbour & MB_LEFT && (!b_mbaff || h->mb.cache.ref[l][x264_scan8[0]-1+2*8] >=0) )
1156 CP16( h->mb.cache.mvd[l][x264_scan8[8 ] - 1], mvd[left[LBOT]][left_index_table->intra[2]] );
1157 CP16( h->mb.cache.mvd[l][x264_scan8[10] - 1], mvd[left[LBOT]][left_index_table->intra[3]] );
1161 M16( h->mb.cache.mvd[l][x264_scan8[0]-1+2*8] ) = 0;
1162 M16( h->mb.cache.mvd[l][x264_scan8[0]-1+3*8] ) = 0;
1166 /* If motion vectors are cached from frame macroblocks but this
1167 * macroblock is a field macroblock then the motion vector must be
1168 * halved. Similarly, motion vectors from field macroblocks are doubled. */
1172 if( FIELD_DIFFERENT(h->mb.i_mb_topleft_xy) )\
1173 MAP_F2F(mv, ref, x264_scan8[0] - 1 - 1*8)\
1174 if( FIELD_DIFFERENT(top) )\
1176 MAP_F2F(mv, ref, x264_scan8[0] + 0 - 1*8)\
1177 MAP_F2F(mv, ref, x264_scan8[0] + 1 - 1*8)\
1178 MAP_F2F(mv, ref, x264_scan8[0] + 2 - 1*8)\
1179 MAP_F2F(mv, ref, x264_scan8[0] + 3 - 1*8)\
1181 if( FIELD_DIFFERENT(h->mb.i_mb_topright_xy) )\
1182 MAP_F2F(mv, ref, x264_scan8[0] + 4 - 1*8)\
1183 if( FIELD_DIFFERENT(left[0]) )\
1185 MAP_F2F(mv, ref, x264_scan8[0] - 1 + 0*8)\
1186 MAP_F2F(mv, ref, x264_scan8[0] - 1 + 1*8)\
1187 MAP_F2F(mv, ref, x264_scan8[0] - 1 + 2*8)\
1188 MAP_F2F(mv, ref, x264_scan8[0] - 1 + 3*8)\
1189 MAP_F2F(topright_mv, topright_ref, 0)\
1190 MAP_F2F(topright_mv, topright_ref, 1)\
1191 MAP_F2F(topright_mv, topright_ref, 2)\
1196 #define FIELD_DIFFERENT(macroblock) (macroblock >= 0 && !h->mb.field[macroblock])
1197 #define MAP_F2F(varmv, varref, index)\
1198 if( h->mb.cache.varref[l][index] >= 0 )\
1200 h->mb.cache.varref[l][index] <<= 1;\
1201 h->mb.cache.varmv[l][index][1] /= 2;\
1202 h->mb.cache.mvd[l][index][1] >>= 1;\
1206 #undef FIELD_DIFFERENT
1210 #define FIELD_DIFFERENT(macroblock) (macroblock >= 0 && h->mb.field[macroblock])
1211 #define MAP_F2F(varmv, varref, index)\
1212 if( h->mb.cache.varref[l][index] >= 0 )\
1214 h->mb.cache.varref[l][index] >>= 1;\
1215 h->mb.cache.varmv[l][index][1] <<= 1;\
1216 h->mb.cache.mvd[l][index][1] <<= 1;\
1220 #undef FIELD_DIFFERENT
1225 if( b_mbaff && mb_x == 0 && !(mb_y&1) && mb_y > 0 )
1226 h->mb.field_decoding_flag = h->mb.field[h->mb.i_mb_xy - h->mb.i_mb_stride];
1228 /* Check whether skip here would cause decoder to predict interlace mode incorrectly.
1229 * FIXME: It might be better to change the interlace type rather than forcing a skip to be non-skip. */
1230 h->mb.b_allow_skip = 1;
1233 if( MB_INTERLACED != h->mb.field_decoding_flag &&
1234 h->mb.i_mb_prev_xy >= 0 && IS_SKIP(h->mb.type[h->mb.i_mb_prev_xy]) )
1235 h->mb.b_allow_skip = 0;
1236 if( (mb_y&1) && IS_SKIP(h->mb.type[h->mb.i_mb_xy - h->mb.i_mb_stride]) )
1238 if( h->mb.i_neighbour & MB_LEFT )
1240 if( h->mb.field[h->mb.i_mb_xy - 1] != MB_INTERLACED )
1241 h->mb.b_allow_skip = 0;
1243 else if( h->mb.i_neighbour & MB_TOP )
1245 if( h->mb.field[h->mb.i_mb_top_xy] != MB_INTERLACED )
1246 h->mb.b_allow_skip = 0;
1248 else // Frame mb pair is predicted
1251 h->mb.b_allow_skip = 0;
1256 if( h->param.b_cabac )
1260 int left_xy, top_xy;
1261 /* Neighbours here are calculated based on field_decoding_flag */
1262 int mb_xy = mb_x + (mb_y&~1)*h->mb.i_mb_stride;
1263 left_xy = mb_xy - 1;
1264 if( (mb_y&1) && mb_x > 0 && h->mb.field_decoding_flag == h->mb.field[left_xy] )
1265 left_xy += h->mb.i_mb_stride;
1266 if( h->mb.field_decoding_flag )
1268 top_xy = mb_xy - h->mb.i_mb_stride;
1269 if( !(mb_y&1) && top_xy >= 0 && h->mb.slice_table[top_xy] == h->sh.i_first_mb && h->mb.field[top_xy] )
1270 top_xy -= h->mb.i_mb_stride;
1273 top_xy = mb_x + (mb_y-1)*h->mb.i_mb_stride;
1275 h->mb.cache.i_neighbour_skip = (mb_x > 0 && h->mb.slice_table[left_xy] == h->sh.i_first_mb && !IS_SKIP( h->mb.type[left_xy] ))
1276 + (top_xy >= 0 && h->mb.slice_table[top_xy] == h->sh.i_first_mb && !IS_SKIP( h->mb.type[top_xy] ));
1280 h->mb.cache.i_neighbour_skip = ((h->mb.i_neighbour & MB_LEFT) && !IS_SKIP( h->mb.i_mb_type_left[0] ))
1281 + ((h->mb.i_neighbour & MB_TOP) && !IS_SKIP( h->mb.i_mb_type_top ));
1286 if( h->sh.i_type == SLICE_TYPE_B )
1288 h->mb.bipred_weight = h->mb.bipred_weight_buf[MB_INTERLACED][MB_INTERLACED&(mb_y&1)];
1289 h->mb.dist_scale_factor = h->mb.dist_scale_factor_buf[MB_INTERLACED][MB_INTERLACED&(mb_y&1)];
1290 if( h->param.b_cabac )
1293 x264_macroblock_cache_skip( h, 0, 0, 4, 4, 0 );
1296 skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp[left[LTOP]] : 0;
1297 h->mb.cache.skip[x264_scan8[0] - 1] = (skipbp >> (1+(left_index_table->mv[0]&~1))) & 1;
1298 skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp[left[LBOT]] : 0;
1299 h->mb.cache.skip[x264_scan8[8] - 1] = (skipbp >> (1+(left_index_table->mv[2]&~1))) & 1;
1303 skipbp = (h->mb.i_neighbour & MB_LEFT) ? h->mb.skipbp[left[0]] : 0;
1304 h->mb.cache.skip[x264_scan8[0] - 1] = skipbp & 0x2;
1305 h->mb.cache.skip[x264_scan8[8] - 1] = skipbp & 0x8;
1307 skipbp = (h->mb.i_neighbour & MB_TOP) ? h->mb.skipbp[top] : 0;
1308 h->mb.cache.skip[x264_scan8[0] - 8] = skipbp & 0x4;
1309 h->mb.cache.skip[x264_scan8[4] - 8] = skipbp & 0x8;
1313 if( h->sh.i_type == SLICE_TYPE_P )
1314 x264_mb_predict_mv_pskip( h, h->mb.cache.pskip_mv );
1316 h->mb.i_neighbour4[0] =
1317 h->mb.i_neighbour8[0] = (h->mb.i_neighbour_intra & (MB_TOP|MB_LEFT|MB_TOPLEFT))
1318 | ((h->mb.i_neighbour_intra & MB_TOP) ? MB_TOPRIGHT : 0);
1319 h->mb.i_neighbour4[4] =
1320 h->mb.i_neighbour4[1] = MB_LEFT | ((h->mb.i_neighbour_intra & MB_TOP) ? (MB_TOP|MB_TOPLEFT|MB_TOPRIGHT) : 0);
1321 h->mb.i_neighbour4[2] =
1322 h->mb.i_neighbour4[8] =
1323 h->mb.i_neighbour4[10] =
1324 h->mb.i_neighbour8[2] = MB_TOP|MB_TOPRIGHT | ((h->mb.i_neighbour_intra & MB_LEFT) ? (MB_LEFT|MB_TOPLEFT) : 0);
1325 h->mb.i_neighbour4[5] =
1326 h->mb.i_neighbour8[1] = MB_LEFT | (h->mb.i_neighbour_intra & MB_TOPRIGHT)
1327 | ((h->mb.i_neighbour_intra & MB_TOP) ? MB_TOP|MB_TOPLEFT : 0);
1330 void x264_macroblock_cache_load_progressive( x264_t *h, int mb_x, int mb_y )
1332 x264_macroblock_cache_load( h, mb_x, mb_y, 0 );
1335 void x264_macroblock_cache_load_interlaced( x264_t *h, int mb_x, int mb_y )
1337 x264_macroblock_cache_load( h, mb_x, mb_y, 1 );
1340 void x264_macroblock_cache_load_neighbours_deblock( x264_t *h, int mb_x, int mb_y )
1342 int deblock_on_slice_edges = h->sh.i_disable_deblocking_filter_idc != 2;
1344 h->mb.i_neighbour = 0;
1345 h->mb.i_mb_xy = mb_y * h->mb.i_mb_stride + mb_x;
1346 h->mb.b_interlaced = PARAM_INTERLACED && h->mb.field[h->mb.i_mb_xy];
1347 h->mb.i_mb_top_y = mb_y - (1 << MB_INTERLACED);
1348 h->mb.i_mb_top_xy = mb_x + h->mb.i_mb_stride*h->mb.i_mb_top_y;
1349 h->mb.i_mb_left_xy[1] =
1350 h->mb.i_mb_left_xy[0] = h->mb.i_mb_xy - 1;
1355 if( mb_x && h->mb.field[h->mb.i_mb_xy - 1] != MB_INTERLACED )
1356 h->mb.i_mb_left_xy[0] -= h->mb.i_mb_stride;
1360 if( h->mb.i_mb_top_xy >= 0 && MB_INTERLACED && !h->mb.field[h->mb.i_mb_top_xy] )
1362 h->mb.i_mb_top_xy += h->mb.i_mb_stride;
1365 if( mb_x && h->mb.field[h->mb.i_mb_xy - 1] != MB_INTERLACED )
1366 h->mb.i_mb_left_xy[1] += h->mb.i_mb_stride;
1370 if( mb_x > 0 && (deblock_on_slice_edges ||
1371 h->mb.slice_table[h->mb.i_mb_left_xy[0]] == h->mb.slice_table[h->mb.i_mb_xy]) )
1372 h->mb.i_neighbour |= MB_LEFT;
1373 if( mb_y > MB_INTERLACED && (deblock_on_slice_edges
1374 || h->mb.slice_table[h->mb.i_mb_top_xy] == h->mb.slice_table[h->mb.i_mb_xy]) )
1375 h->mb.i_neighbour |= MB_TOP;
1378 static void x264_macroblock_deblock_strength_mbaff( x264_t *h, uint8_t (*bs)[8][4] )
1380 if( (h->mb.i_neighbour & MB_LEFT) && h->mb.field[h->mb.i_mb_left_xy[0]] != MB_INTERLACED )
1382 static const uint8_t offset[2][2][8] =
1383 { { { 0, 0, 0, 0, 1, 1, 1, 1 },
1384 { 2, 2, 2, 2, 3, 3, 3, 3 }, },
1385 { { 0, 1, 2, 3, 0, 1, 2, 3 },
1386 { 0, 1, 2, 3, 0, 1, 2, 3 }, }
1388 ALIGNED_ARRAY_8( uint8_t, tmpbs, [8] );
1390 const uint8_t *off = offset[MB_INTERLACED][h->mb.i_mb_y&1];
1391 uint8_t (*nnz)[48] = h->mb.non_zero_count;
1393 for( int i = 0; i < 8; i++ )
1395 int left = h->mb.i_mb_left_xy[MB_INTERLACED ? i>>2 : i&1];
1396 int nnz_this = h->mb.cache.non_zero_count[x264_scan8[0]+8*(i>>1)];
1397 int nnz_left = nnz[left][3 + 4*off[i]];
1398 if( !h->param.b_cabac && h->pps->b_transform_8x8_mode )
1401 if( h->mb.mb_transform_size[left] )
1402 nnz_left = !!(M16( &nnz[left][2+4*j] ) | M16( &nnz[left][2+4*(1+j)] ));
1404 tmpbs[i] = (nnz_left || nnz_this) ? 2 : 1;
1409 CP32( bs[0][0], &tmpbs[0] );
1410 CP32( bs[0][4], &tmpbs[4] );
1414 for( int i = 0; i < 4; i++ ) bs[0][0][i] = tmpbs[2*i];
1415 for( int i = 0; i < 4; i++ ) bs[0][4][i] = tmpbs[1+2*i];
1419 if( (h->mb.i_neighbour & MB_TOP) && MB_INTERLACED != h->mb.field[h->mb.i_mb_top_xy] )
1421 if( !(h->mb.i_mb_y&1) && !MB_INTERLACED )
1423 /* Need to filter both fields (even for frame macroblocks).
1424 * Filter top two rows using the top macroblock of the above
1425 * pair and then the bottom one. */
1426 int mbn_xy = h->mb.i_mb_xy - 2 * h->mb.i_mb_stride;
1427 uint8_t *nnz_cur = &h->mb.cache.non_zero_count[x264_scan8[0]];
1429 for( int j = 0; j < 2; j++, mbn_xy += h->mb.i_mb_stride )
1431 uint8_t (*nnz)[48] = h->mb.non_zero_count;
1433 ALIGNED_4( uint8_t nnz_top[4] );
1434 CP32( nnz_top, &nnz[mbn_xy][3*4] );
1436 if( !h->param.b_cabac && h->pps->b_transform_8x8_mode && h->mb.mb_transform_size[mbn_xy] )
1438 int nnz_top0 = M16( &nnz[mbn_xy][8] ) | M16( &nnz[mbn_xy][12] );
1439 int nnz_top1 = M16( &nnz[mbn_xy][10] ) | M16( &nnz[mbn_xy][14] );
1440 nnz_top[0] = nnz_top[1] = nnz_top0 ? 0x0101 : 0;
1441 nnz_top[2] = nnz_top[3] = nnz_top1 ? 0x0101 : 0;
1444 for( int i = 0; i < 4; i++ )
1445 bs[1][4*j][i] = (nnz_cur[i] || nnz_top[i]) ? 2 : 1;
1449 for( int i = 0; i < 4; i++ )
1450 bs[1][0][i] = X264_MAX( bs[1][0][i], 1 );
1454 void x264_macroblock_deblock_strength( x264_t *h )
1456 uint8_t (*bs)[8][4] = h->deblock_strength[h->mb.i_mb_y&1][h->mb.i_mb_x];
1457 if( IS_INTRA( h->mb.type[h->mb.i_mb_xy] ) )
1459 memset( bs[0][1], 3, 3*4*sizeof(uint8_t) );
1460 memset( bs[1][1], 3, 3*4*sizeof(uint8_t) );
1464 /* If we have multiple slices and we're deblocking on slice edges, we
1465 * have to reload neighbour data. */
1466 if( SLICE_MBAFF || (h->sh.i_first_mb && h->sh.i_disable_deblocking_filter_idc != 2) )
1468 int old_neighbour = h->mb.i_neighbour;
1469 int mb_x = h->mb.i_mb_x;
1470 int mb_y = h->mb.i_mb_y;
1471 x264_macroblock_cache_load_neighbours_deblock( h, mb_x, mb_y );
1472 int new_neighbour = h->mb.i_neighbour;
1473 h->mb.i_neighbour &= ~old_neighbour;
1474 if( h->mb.i_neighbour )
1476 int top_y = h->mb.i_mb_top_y;
1477 int top_8x8 = (2*top_y+1) * h->mb.i_b8_stride + 2*mb_x;
1478 int top_4x4 = (4*top_y+3) * h->mb.i_b4_stride + 4*mb_x;
1479 int s8x8 = h->mb.i_b8_stride;
1480 int s4x4 = h->mb.i_b4_stride;
1482 uint8_t (*nnz)[48] = h->mb.non_zero_count;
1483 const x264_left_table_t *left_index_table = SLICE_MBAFF ? h->mb.left_index_table : &left_indices[3];
1485 if( h->mb.i_neighbour & MB_TOP )
1486 CP32( &h->mb.cache.non_zero_count[x264_scan8[0] - 8], &nnz[h->mb.i_mb_top_xy][12] );
1488 if( h->mb.i_neighbour & MB_LEFT )
1490 int *left = h->mb.i_mb_left_xy;
1491 h->mb.cache.non_zero_count[x264_scan8[0 ] - 1] = nnz[left[0]][left_index_table->nnz[0]];
1492 h->mb.cache.non_zero_count[x264_scan8[2 ] - 1] = nnz[left[0]][left_index_table->nnz[1]];
1493 h->mb.cache.non_zero_count[x264_scan8[8 ] - 1] = nnz[left[1]][left_index_table->nnz[2]];
1494 h->mb.cache.non_zero_count[x264_scan8[10] - 1] = nnz[left[1]][left_index_table->nnz[3]];
1497 for( int l = 0; l <= (h->sh.i_type == SLICE_TYPE_B); l++ )
1499 int16_t (*mv)[2] = h->mb.mv[l];
1500 int8_t *ref = h->mb.ref[l];
1502 int i8 = x264_scan8[0] - 8;
1503 if( h->mb.i_neighbour & MB_TOP )
1505 h->mb.cache.ref[l][i8+0] =
1506 h->mb.cache.ref[l][i8+1] = ref[top_8x8 + 0];
1507 h->mb.cache.ref[l][i8+2] =
1508 h->mb.cache.ref[l][i8+3] = ref[top_8x8 + 1];
1509 CP128( h->mb.cache.mv[l][i8], mv[top_4x4] );
1512 i8 = x264_scan8[0] - 1;
1513 if( h->mb.i_neighbour & MB_LEFT )
1515 h->mb.cache.ref[l][i8+0*8] =
1516 h->mb.cache.ref[l][i8+1*8] = ref[h->mb.left_b8[0] + 1 + s8x8*left_index_table->ref[0]];
1517 h->mb.cache.ref[l][i8+2*8] =
1518 h->mb.cache.ref[l][i8+3*8] = ref[h->mb.left_b8[1] + 1 + s8x8*left_index_table->ref[2]];
1520 CP32( h->mb.cache.mv[l][i8+0*8], mv[h->mb.left_b4[0] + 3 + s4x4*left_index_table->mv[0]] );
1521 CP32( h->mb.cache.mv[l][i8+1*8], mv[h->mb.left_b4[0] + 3 + s4x4*left_index_table->mv[1]] );
1522 CP32( h->mb.cache.mv[l][i8+2*8], mv[h->mb.left_b4[1] + 3 + s4x4*left_index_table->mv[2]] );
1523 CP32( h->mb.cache.mv[l][i8+3*8], mv[h->mb.left_b4[1] + 3 + s4x4*left_index_table->mv[3]] );
1527 h->mb.i_neighbour = new_neighbour;
1530 if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART && h->sh.i_type == SLICE_TYPE_P )
1532 /* Handle reference frame duplicates */
1533 int i8 = x264_scan8[0] - 8;
1534 h->mb.cache.ref[0][i8+0] =
1535 h->mb.cache.ref[0][i8+1] = deblock_ref_table(h->mb.cache.ref[0][i8+0]);
1536 h->mb.cache.ref[0][i8+2] =
1537 h->mb.cache.ref[0][i8+3] = deblock_ref_table(h->mb.cache.ref[0][i8+2]);
1539 i8 = x264_scan8[0] - 1;
1540 h->mb.cache.ref[0][i8+0*8] =
1541 h->mb.cache.ref[0][i8+1*8] = deblock_ref_table(h->mb.cache.ref[0][i8+0*8]);
1542 h->mb.cache.ref[0][i8+2*8] =
1543 h->mb.cache.ref[0][i8+3*8] = deblock_ref_table(h->mb.cache.ref[0][i8+2*8]);
1545 int ref0 = deblock_ref_table(h->mb.cache.ref[0][x264_scan8[ 0]]);
1546 int ref1 = deblock_ref_table(h->mb.cache.ref[0][x264_scan8[ 4]]);
1547 int ref2 = deblock_ref_table(h->mb.cache.ref[0][x264_scan8[ 8]]);
1548 int ref3 = deblock_ref_table(h->mb.cache.ref[0][x264_scan8[12]]);
1549 uint32_t reftop = pack16to32( (uint8_t)ref0, (uint8_t)ref1 ) * 0x0101;
1550 uint32_t refbot = pack16to32( (uint8_t)ref2, (uint8_t)ref3 ) * 0x0101;
1552 M32( &h->mb.cache.ref[0][x264_scan8[0]+8*0] ) = reftop;
1553 M32( &h->mb.cache.ref[0][x264_scan8[0]+8*1] ) = reftop;
1554 M32( &h->mb.cache.ref[0][x264_scan8[0]+8*2] ) = refbot;
1555 M32( &h->mb.cache.ref[0][x264_scan8[0]+8*3] ) = refbot;
1558 /* Munge NNZ for cavlc + 8x8dct */
1559 if( !h->param.b_cabac && h->pps->b_transform_8x8_mode )
1561 uint8_t (*nnz)[48] = h->mb.non_zero_count;
1562 int top = h->mb.i_mb_top_xy;
1563 int *left = h->mb.i_mb_left_xy;
1565 if( (h->mb.i_neighbour & MB_TOP) && h->mb.mb_transform_size[top] )
1567 int i8 = x264_scan8[0] - 8;
1568 int nnz_top0 = M16( &nnz[top][8] ) | M16( &nnz[top][12] );
1569 int nnz_top1 = M16( &nnz[top][10] ) | M16( &nnz[top][14] );
1570 M16( &h->mb.cache.non_zero_count[i8+0] ) = nnz_top0 ? 0x0101 : 0;
1571 M16( &h->mb.cache.non_zero_count[i8+2] ) = nnz_top1 ? 0x0101 : 0;
1574 if( h->mb.i_neighbour & MB_LEFT )
1576 int i8 = x264_scan8[0] - 1;
1577 if( h->mb.mb_transform_size[left[0]] )
1579 int nnz_left0 = M16( &nnz[left[0]][2] ) | M16( &nnz[left[0]][6] );
1580 h->mb.cache.non_zero_count[i8+8*0] = !!nnz_left0;
1581 h->mb.cache.non_zero_count[i8+8*1] = !!nnz_left0;
1583 if( h->mb.mb_transform_size[left[1]] )
1585 int nnz_left1 = M16( &nnz[left[1]][10] ) | M16( &nnz[left[1]][14] );
1586 h->mb.cache.non_zero_count[i8+8*2] = !!nnz_left1;
1587 h->mb.cache.non_zero_count[i8+8*3] = !!nnz_left1;
1591 if( h->mb.mb_transform_size[h->mb.i_mb_xy] )
1593 int nnz0 = M16( &h->mb.cache.non_zero_count[x264_scan8[ 0]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[ 2]] );
1594 int nnz1 = M16( &h->mb.cache.non_zero_count[x264_scan8[ 4]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[ 6]] );
1595 int nnz2 = M16( &h->mb.cache.non_zero_count[x264_scan8[ 8]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[10]] );
1596 int nnz3 = M16( &h->mb.cache.non_zero_count[x264_scan8[12]] ) | M16( &h->mb.cache.non_zero_count[x264_scan8[14]] );
1597 uint32_t nnztop = pack16to32( !!nnz0, !!nnz1 ) * 0x0101;
1598 uint32_t nnzbot = pack16to32( !!nnz2, !!nnz3 ) * 0x0101;
1600 M32( &h->mb.cache.non_zero_count[x264_scan8[0]+8*0] ) = nnztop;
1601 M32( &h->mb.cache.non_zero_count[x264_scan8[0]+8*1] ) = nnztop;
1602 M32( &h->mb.cache.non_zero_count[x264_scan8[0]+8*2] ) = nnzbot;
1603 M32( &h->mb.cache.non_zero_count[x264_scan8[0]+8*3] ) = nnzbot;
1607 h->loopf.deblock_strength( h->mb.cache.non_zero_count, h->mb.cache.ref, h->mb.cache.mv,
1608 bs, 4 >> MB_INTERLACED, h->sh.i_type == SLICE_TYPE_B );
1611 x264_macroblock_deblock_strength_mbaff( h, bs );
1614 static void ALWAYS_INLINE x264_macroblock_store_pic( x264_t *h, int mb_x, int mb_y, int i, int b_chroma, int b_mbaff )
1616 int w = b_chroma ? 8 : 16;
1617 int i_stride = h->fdec->i_stride[i];
1618 int i_stride2 = i_stride << (b_mbaff && MB_INTERLACED);
1619 int i_pix_offset = (b_mbaff && MB_INTERLACED)
1620 ? 16 * mb_x + w * (mb_y&~1) * i_stride + (mb_y&1) * i_stride
1621 : 16 * mb_x + w * mb_y * i_stride;
1623 h->mc.store_interleave_8x8x2( &h->fdec->plane[1][i_pix_offset], i_stride2, h->mb.pic.p_fdec[1], h->mb.pic.p_fdec[2] );
1625 h->mc.copy[PIXEL_16x16]( &h->fdec->plane[i][i_pix_offset], i_stride2, h->mb.pic.p_fdec[i], FDEC_STRIDE, 16 );
1628 static void ALWAYS_INLINE x264_macroblock_backup_intra( x264_t *h, int mb_x, int mb_y, int b_mbaff )
1630 /* In MBAFF we store the last two rows in intra_border_backup[0] and [1].
1631 * For progressive mbs this is the bottom two rows, and for interlaced the
1632 * bottom row of each field. We also store samples needed for the next
1633 * mbpair in intra_border_backup[2]. */
1634 int backup_dst = !b_mbaff ? 0 : (mb_y&1) ? 1 : MB_INTERLACED ? 0 : 2;
1635 memcpy( &h->intra_border_backup[backup_dst][0][mb_x*16 ], h->mb.pic.p_fdec[0]+FDEC_STRIDE*15, 16*sizeof(pixel) );
1638 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16 ], h->mb.pic.p_fdec[1]+FDEC_STRIDE*15, 16*sizeof(pixel) );
1639 memcpy( &h->intra_border_backup[backup_dst][2][mb_x*16 ], h->mb.pic.p_fdec[2]+FDEC_STRIDE*15, 16*sizeof(pixel) );
1643 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16 ], h->mb.pic.p_fdec[1]+FDEC_STRIDE*7, 8*sizeof(pixel) );
1644 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16+8], h->mb.pic.p_fdec[2]+FDEC_STRIDE*7, 8*sizeof(pixel) );
1650 int backup_src = (MB_INTERLACED ? 7 : 14) * FDEC_STRIDE;
1651 backup_dst = MB_INTERLACED ? 2 : 0;
1652 memcpy( &h->intra_border_backup[backup_dst][0][mb_x*16 ], h->mb.pic.p_fdec[0]+backup_src, 16*sizeof(pixel) );
1655 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16 ], h->mb.pic.p_fdec[1]+backup_src, 16*sizeof(pixel) );
1656 memcpy( &h->intra_border_backup[backup_dst][2][mb_x*16 ], h->mb.pic.p_fdec[2]+backup_src, 16*sizeof(pixel) );
1660 backup_src = (MB_INTERLACED ? 3 : 6) * FDEC_STRIDE;
1661 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16 ], h->mb.pic.p_fdec[1]+backup_src, 8*sizeof(pixel) );
1662 memcpy( &h->intra_border_backup[backup_dst][1][mb_x*16+8], h->mb.pic.p_fdec[2]+backup_src, 8*sizeof(pixel) );
1668 /* In progressive we update intra_border_backup in-place, so the topleft neighbor will
1669 * no longer exist there when load_pic_pointers wants it. Move it within p_fdec instead. */
1670 h->mb.pic.p_fdec[0][-FDEC_STRIDE-1] = h->mb.pic.p_fdec[0][-FDEC_STRIDE+15];
1671 h->mb.pic.p_fdec[1][-FDEC_STRIDE-1] = h->mb.pic.p_fdec[1][-FDEC_STRIDE+7 + 8*CHROMA444];
1672 h->mb.pic.p_fdec[2][-FDEC_STRIDE-1] = h->mb.pic.p_fdec[2][-FDEC_STRIDE+7 + 8*CHROMA444];
1676 void x264_macroblock_cache_save( x264_t *h )
1678 const int i_mb_xy = h->mb.i_mb_xy;
1679 const int i_mb_type = x264_mb_type_fix[h->mb.i_type];
1680 const int s8x8 = h->mb.i_b8_stride;
1681 const int s4x4 = h->mb.i_b4_stride;
1682 const int i_mb_4x4 = h->mb.i_b4_xy;
1683 const int i_mb_8x8 = h->mb.i_b8_xy;
1685 /* GCC pessimizes direct stores to heap-allocated arrays due to aliasing. */
1686 /* By only dereferencing them once, we avoid this issue. */
1687 int8_t *i4x4 = h->mb.intra4x4_pred_mode[i_mb_xy];
1688 uint8_t *nnz = h->mb.non_zero_count[i_mb_xy];
1692 x264_macroblock_backup_intra( h, h->mb.i_mb_x, h->mb.i_mb_y, 1 );
1693 x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 0, 0, 1 );
1696 x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 1, 0, 1 );
1697 x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 2, 0, 1 );
1700 x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 1, 1, 1 );
1704 x264_macroblock_backup_intra( h, h->mb.i_mb_x, h->mb.i_mb_y, 0 );
1705 x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 0, 0, 0 );
1708 x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 1, 0, 0 );
1709 x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 2, 0, 0 );
1712 x264_macroblock_store_pic( h, h->mb.i_mb_x, h->mb.i_mb_y, 1, 1, 0 );
1715 x264_prefetch_fenc( h, h->fdec, h->mb.i_mb_x, h->mb.i_mb_y );
1717 h->mb.type[i_mb_xy] = i_mb_type;
1718 h->mb.slice_table[i_mb_xy] = h->sh.i_first_mb;
1719 h->mb.partition[i_mb_xy] = IS_INTRA( i_mb_type ) ? D_16x16 : h->mb.i_partition;
1720 h->mb.i_mb_prev_xy = i_mb_xy;
1723 if( i_mb_type == I_4x4 )
1725 CP32( &i4x4[0], &h->mb.cache.intra4x4_pred_mode[x264_scan8[10]] );
1726 M32( &i4x4[4] ) = pack8to32( h->mb.cache.intra4x4_pred_mode[x264_scan8[5] ],
1727 h->mb.cache.intra4x4_pred_mode[x264_scan8[7] ],
1728 h->mb.cache.intra4x4_pred_mode[x264_scan8[13] ], 0);
1730 else if( !h->param.b_constrained_intra || IS_INTRA(i_mb_type) )
1731 M64( i4x4 ) = I_PRED_4x4_DC * 0x0101010101010101ULL;
1733 M64( i4x4 ) = (uint8_t)(-1) * 0x0101010101010101ULL;
1736 if( i_mb_type == I_PCM )
1738 h->mb.qp[i_mb_xy] = 0;
1739 h->mb.i_last_dqp = 0;
1740 h->mb.i_cbp_chroma = CHROMA444 ? 0 : 2;
1741 h->mb.i_cbp_luma = 0xf;
1742 h->mb.cbp[i_mb_xy] = (h->mb.i_cbp_chroma << 4) | h->mb.i_cbp_luma | 0x700;
1743 h->mb.b_transform_8x8 = 0;
1744 for( int i = 0; i < 48; i++ )
1745 h->mb.cache.non_zero_count[x264_scan8[i]] = h->param.b_cabac ? 1 : 16;
1749 if( h->mb.i_type != I_16x16 && h->mb.i_cbp_luma == 0 && h->mb.i_cbp_chroma == 0 )
1750 h->mb.i_qp = h->mb.i_last_qp;
1751 h->mb.qp[i_mb_xy] = h->mb.i_qp;
1752 h->mb.i_last_dqp = h->mb.i_qp - h->mb.i_last_qp;
1753 h->mb.i_last_qp = h->mb.i_qp;
1756 /* save non zero count */
1757 CP32( &nnz[ 0+0*4], &h->mb.cache.non_zero_count[x264_scan8[ 0]] );
1758 CP32( &nnz[ 0+1*4], &h->mb.cache.non_zero_count[x264_scan8[ 2]] );
1759 CP32( &nnz[ 0+2*4], &h->mb.cache.non_zero_count[x264_scan8[ 8]] );
1760 CP32( &nnz[ 0+3*4], &h->mb.cache.non_zero_count[x264_scan8[10]] );
1761 CP32( &nnz[16+0*4], &h->mb.cache.non_zero_count[x264_scan8[16+0]] );
1762 CP32( &nnz[16+1*4], &h->mb.cache.non_zero_count[x264_scan8[16+2]] );
1763 CP32( &nnz[32+0*4], &h->mb.cache.non_zero_count[x264_scan8[32+0]] );
1764 CP32( &nnz[32+1*4], &h->mb.cache.non_zero_count[x264_scan8[32+2]] );
1767 CP32( &nnz[16+2*4], &h->mb.cache.non_zero_count[x264_scan8[16+ 8]] );
1768 CP32( &nnz[16+3*4], &h->mb.cache.non_zero_count[x264_scan8[16+10]] );
1769 CP32( &nnz[32+2*4], &h->mb.cache.non_zero_count[x264_scan8[32+ 8]] );
1770 CP32( &nnz[32+3*4], &h->mb.cache.non_zero_count[x264_scan8[32+10]] );
1773 if( h->mb.i_cbp_luma == 0 && h->mb.i_type != I_8x8 )
1774 h->mb.b_transform_8x8 = 0;
1775 h->mb.mb_transform_size[i_mb_xy] = h->mb.b_transform_8x8;
1777 if( h->sh.i_type != SLICE_TYPE_I )
1779 int16_t (*mv0)[2] = &h->mb.mv[0][i_mb_4x4];
1780 int16_t (*mv1)[2] = &h->mb.mv[1][i_mb_4x4];
1781 int8_t *ref0 = &h->mb.ref[0][i_mb_8x8];
1782 int8_t *ref1 = &h->mb.ref[1][i_mb_8x8];
1783 if( !IS_INTRA( i_mb_type ) )
1785 ref0[0+0*s8x8] = h->mb.cache.ref[0][x264_scan8[0]];
1786 ref0[1+0*s8x8] = h->mb.cache.ref[0][x264_scan8[4]];
1787 ref0[0+1*s8x8] = h->mb.cache.ref[0][x264_scan8[8]];
1788 ref0[1+1*s8x8] = h->mb.cache.ref[0][x264_scan8[12]];
1789 CP128( &mv0[0*s4x4], h->mb.cache.mv[0][x264_scan8[0]+8*0] );
1790 CP128( &mv0[1*s4x4], h->mb.cache.mv[0][x264_scan8[0]+8*1] );
1791 CP128( &mv0[2*s4x4], h->mb.cache.mv[0][x264_scan8[0]+8*2] );
1792 CP128( &mv0[3*s4x4], h->mb.cache.mv[0][x264_scan8[0]+8*3] );
1793 if( h->sh.i_type == SLICE_TYPE_B )
1795 ref1[0+0*s8x8] = h->mb.cache.ref[1][x264_scan8[0]];
1796 ref1[1+0*s8x8] = h->mb.cache.ref[1][x264_scan8[4]];
1797 ref1[0+1*s8x8] = h->mb.cache.ref[1][x264_scan8[8]];
1798 ref1[1+1*s8x8] = h->mb.cache.ref[1][x264_scan8[12]];
1799 CP128( &mv1[0*s4x4], h->mb.cache.mv[1][x264_scan8[0]+8*0] );
1800 CP128( &mv1[1*s4x4], h->mb.cache.mv[1][x264_scan8[0]+8*1] );
1801 CP128( &mv1[2*s4x4], h->mb.cache.mv[1][x264_scan8[0]+8*2] );
1802 CP128( &mv1[3*s4x4], h->mb.cache.mv[1][x264_scan8[0]+8*3] );
1807 M16( &ref0[0*s8x8] ) = (uint8_t)(-1) * 0x0101;
1808 M16( &ref0[1*s8x8] ) = (uint8_t)(-1) * 0x0101;
1809 M128( &mv0[0*s4x4] ) = M128_ZERO;
1810 M128( &mv0[1*s4x4] ) = M128_ZERO;
1811 M128( &mv0[2*s4x4] ) = M128_ZERO;
1812 M128( &mv0[3*s4x4] ) = M128_ZERO;
1813 if( h->sh.i_type == SLICE_TYPE_B )
1815 M16( &ref1[0*s8x8] ) = (uint8_t)(-1) * 0x0101;
1816 M16( &ref1[1*s8x8] ) = (uint8_t)(-1) * 0x0101;
1817 M128( &mv1[0*s4x4] ) = M128_ZERO;
1818 M128( &mv1[1*s4x4] ) = M128_ZERO;
1819 M128( &mv1[2*s4x4] ) = M128_ZERO;
1820 M128( &mv1[3*s4x4] ) = M128_ZERO;
1825 if( h->param.b_cabac )
1827 uint8_t (*mvd0)[2] = h->mb.mvd[0][i_mb_xy];
1828 uint8_t (*mvd1)[2] = h->mb.mvd[1][i_mb_xy];
1829 if( IS_INTRA(i_mb_type) && i_mb_type != I_PCM )
1830 h->mb.chroma_pred_mode[i_mb_xy] = x264_mb_pred_mode8x8c_fix[ h->mb.i_chroma_pred_mode ];
1832 h->mb.chroma_pred_mode[i_mb_xy] = I_PRED_CHROMA_DC;
1834 if( (0x3FF30 >> i_mb_type) & 1 ) /* !INTRA && !SKIP && !DIRECT */
1836 CP64( mvd0[0], h->mb.cache.mvd[0][x264_scan8[10]] );
1837 CP16( mvd0[4], h->mb.cache.mvd[0][x264_scan8[5 ]] );
1838 CP16( mvd0[5], h->mb.cache.mvd[0][x264_scan8[7 ]] );
1839 CP16( mvd0[6], h->mb.cache.mvd[0][x264_scan8[13]] );
1840 if( h->sh.i_type == SLICE_TYPE_B )
1842 CP64( mvd1[0], h->mb.cache.mvd[1][x264_scan8[10]] );
1843 CP16( mvd1[4], h->mb.cache.mvd[1][x264_scan8[5 ]] );
1844 CP16( mvd1[5], h->mb.cache.mvd[1][x264_scan8[7 ]] );
1845 CP16( mvd1[6], h->mb.cache.mvd[1][x264_scan8[13]] );
1850 M128( mvd0[0] ) = M128_ZERO;
1851 if( h->sh.i_type == SLICE_TYPE_B )
1852 M128( mvd1[0] ) = M128_ZERO;
1855 if( h->sh.i_type == SLICE_TYPE_B )
1857 if( i_mb_type == B_SKIP || i_mb_type == B_DIRECT )
1858 h->mb.skipbp[i_mb_xy] = 0xf;
1859 else if( i_mb_type == B_8x8 )
1861 int skipbp = ( h->mb.i_sub_partition[0] == D_DIRECT_8x8 ) << 0;
1862 skipbp |= ( h->mb.i_sub_partition[1] == D_DIRECT_8x8 ) << 1;
1863 skipbp |= ( h->mb.i_sub_partition[2] == D_DIRECT_8x8 ) << 2;
1864 skipbp |= ( h->mb.i_sub_partition[3] == D_DIRECT_8x8 ) << 3;
1865 h->mb.skipbp[i_mb_xy] = skipbp;
1868 h->mb.skipbp[i_mb_xy] = 0;
1874 void x264_macroblock_bipred_init( x264_t *h )
1876 for( int mbfield = 0; mbfield <= SLICE_MBAFF; mbfield++ )
1877 for( int field = 0; field <= SLICE_MBAFF; field++ )
1878 for( int i_ref0 = 0; i_ref0 < (h->i_ref[0]<<mbfield); i_ref0++ )
1880 x264_frame_t *l0 = h->fref[0][i_ref0>>mbfield];
1881 int poc0 = l0->i_poc + mbfield*l0->i_delta_poc[field^(i_ref0&1)];
1882 for( int i_ref1 = 0; i_ref1 < (h->i_ref[1]<<mbfield); i_ref1++ )
1884 int dist_scale_factor;
1885 x264_frame_t *l1 = h->fref[1][i_ref1>>mbfield];
1886 int cur_poc = h->fdec->i_poc + mbfield*h->fdec->i_delta_poc[field];
1887 int poc1 = l1->i_poc + mbfield*l1->i_delta_poc[field^(i_ref1&1)];
1888 int td = x264_clip3( poc1 - poc0, -128, 127 );
1889 if( td == 0 /* || pic0 is a long-term ref */ )
1890 dist_scale_factor = 256;
1893 int tb = x264_clip3( cur_poc - poc0, -128, 127 );
1894 int tx = (16384 + (abs(td) >> 1)) / td;
1895 dist_scale_factor = x264_clip3( (tb * tx + 32) >> 6, -1024, 1023 );
1898 h->mb.dist_scale_factor_buf[mbfield][field][i_ref0][i_ref1] = dist_scale_factor;
1900 dist_scale_factor >>= 2;
1901 if( h->param.analyse.b_weighted_bipred
1902 && dist_scale_factor >= -64
1903 && dist_scale_factor <= 128 )
1905 h->mb.bipred_weight_buf[mbfield][field][i_ref0][i_ref1] = 64 - dist_scale_factor;
1906 // ssse3 implementation of biweight doesn't support the extrema.
1907 // if we ever generate them, we'll have to drop that optimization.
1908 assert( dist_scale_factor >= -63 && dist_scale_factor <= 127 );
1911 h->mb.bipred_weight_buf[mbfield][field][i_ref0][i_ref1] = 32;