1 /*****************************************************************************
2 * macroblock.c: h264 encoder library
3 *****************************************************************************
4 * Copyright (C) 2003-2008 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 *****************************************************************************/
26 #include "encoder/me.h"
28 void x264_mb_predict_mv( x264_t *h, int i_list, int idx, int i_width, int16_t mvp[2] )
30 const int i8 = x264_scan8[idx];
31 const int i_ref= h->mb.cache.ref[i_list][i8];
32 int i_refa = h->mb.cache.ref[i_list][i8 - 1];
33 int16_t *mv_a = h->mb.cache.mv[i_list][i8 - 1];
34 int i_refb = h->mb.cache.ref[i_list][i8 - 8];
35 int16_t *mv_b = h->mb.cache.mv[i_list][i8 - 8];
36 int i_refc = h->mb.cache.ref[i_list][i8 - 8 + i_width];
37 int16_t *mv_c = h->mb.cache.mv[i_list][i8 - 8 + i_width];
39 if( (idx&3) >= 2 + (i_width&1) || i_refc == -2 )
41 i_refc = h->mb.cache.ref[i_list][i8 - 8 - 1];
42 mv_c = h->mb.cache.mv[i_list][i8 - 8 - 1];
45 if( h->mb.i_partition == D_16x8 )
64 else if( h->mb.i_partition == D_8x16 )
84 int i_count = (i_refa == i_ref) + (i_refb == i_ref) + (i_refc == i_ref);
89 x264_median_mv( mvp, mv_a, mv_b, mv_c );
91 else if( i_count == 1 )
95 else if( i_refb == i_ref )
100 else if( i_refb == -2 && i_refc == -2 && i_refa != -2 )
106 void x264_mb_predict_mv_16x16( x264_t *h, int i_list, int i_ref, int16_t mvp[2] )
108 int i_refa = h->mb.cache.ref[i_list][X264_SCAN8_0 - 1];
109 int16_t *mv_a = h->mb.cache.mv[i_list][X264_SCAN8_0 - 1];
110 int i_refb = h->mb.cache.ref[i_list][X264_SCAN8_0 - 8];
111 int16_t *mv_b = h->mb.cache.mv[i_list][X264_SCAN8_0 - 8];
112 int i_refc = h->mb.cache.ref[i_list][X264_SCAN8_0 - 8 + 4];
113 int16_t *mv_c = h->mb.cache.mv[i_list][X264_SCAN8_0 - 8 + 4];
116 i_refc = h->mb.cache.ref[i_list][X264_SCAN8_0 - 8 - 1];
117 mv_c = h->mb.cache.mv[i_list][X264_SCAN8_0 - 8 - 1];
120 int i_count = (i_refa == i_ref) + (i_refb == i_ref) + (i_refc == i_ref);
125 x264_median_mv( mvp, mv_a, mv_b, mv_c );
127 else if( i_count == 1 )
129 if( i_refa == i_ref )
131 else if( i_refb == i_ref )
136 else if( i_refb == -2 && i_refc == -2 && i_refa != -2 )
143 void x264_mb_predict_mv_pskip( x264_t *h, int16_t mv[2] )
145 int i_refa = h->mb.cache.ref[0][X264_SCAN8_0 - 1];
146 int i_refb = h->mb.cache.ref[0][X264_SCAN8_0 - 8];
147 int16_t *mv_a = h->mb.cache.mv[0][X264_SCAN8_0 - 1];
148 int16_t *mv_b = h->mb.cache.mv[0][X264_SCAN8_0 - 8];
150 if( i_refa == -2 || i_refb == -2 ||
151 !( i_refa | M32( mv_a ) ) ||
152 !( i_refb | M32( mv_b ) ) )
157 x264_mb_predict_mv_16x16( h, 0, 0, mv );
160 static int x264_mb_predict_mv_direct16x16_temporal( x264_t *h )
162 int i_mb_4x4 = 16 * h->mb.i_mb_stride * h->mb.i_mb_y + 4 * h->mb.i_mb_x;
163 int i_mb_8x8 = 4 * h->mb.i_mb_stride * h->mb.i_mb_y + 2 * h->mb.i_mb_x;
164 const int type_col = h->fref1[0]->mb_type[h->mb.i_mb_xy];
165 const int partition_col = h->fref1[0]->mb_partition[h->mb.i_mb_xy];
167 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, 0 );
169 h->mb.i_partition = partition_col;
171 if( IS_INTRA( type_col ) )
173 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, 0 );
174 x264_macroblock_cache_mv( h, 0, 0, 4, 4, 0, 0 );
175 x264_macroblock_cache_mv( h, 0, 0, 4, 4, 1, 0 );
179 /* Don't do any checks other than the ones we have to, based
180 * on the size of the colocated partitions.
181 * Depends on the enum order: D_8x8, D_16x8, D_8x16, D_16x16 */
182 int max_i8 = (D_16x16 - partition_col) + 1;
183 int step = (partition_col == D_16x8) + 1;
184 int width = 4 >> ((D_16x16 - partition_col)&1);
185 int height = 4 >> ((D_16x16 - partition_col)>>1);
187 for( int i8 = 0; i8 < max_i8; i8 += step )
191 int i_part_8x8 = i_mb_8x8 + x8 + y8 * h->mb.i_b8_stride;
192 int i_ref1_ref = h->fref1[0]->ref[0][i_part_8x8];
193 int i_ref = (map_col_to_list0(i_ref1_ref>>h->sh.b_mbaff) << h->sh.b_mbaff) + (i_ref1_ref&h->sh.b_mbaff);
197 int dist_scale_factor = h->mb.dist_scale_factor[i_ref][0];
198 int16_t *mv_col = h->fref1[0]->mv[0][i_mb_4x4 + 3*x8 + 3*y8 * h->mb.i_b4_stride];
199 int l0x = ( dist_scale_factor * mv_col[0] + 128 ) >> 8;
200 int l0y = ( dist_scale_factor * mv_col[1] + 128 ) >> 8;
201 if( h->param.i_threads > 1 && (l0y > h->mb.mv_max_spel[1] || l0y-mv_col[1] > h->mb.mv_max_spel[1]) )
203 x264_macroblock_cache_ref( h, 2*x8, 2*y8, width, height, 0, i_ref );
204 x264_macroblock_cache_mv( h, 2*x8, 2*y8, width, height, 0, pack16to32_mask(l0x, l0y) );
205 x264_macroblock_cache_mv( h, 2*x8, 2*y8, width, height, 1, pack16to32_mask(l0x-mv_col[0], l0y-mv_col[1]) );
209 /* the collocated ref isn't in the current list0 */
210 /* FIXME: we might still be able to use direct_8x8 on some partitions */
211 /* FIXME: with B-pyramid + extensive ref list reordering
212 * (not currently used), we would also have to check
213 * l1mv1 like in spatial mode */
221 static int x264_mb_predict_mv_direct16x16_spatial( x264_t *h )
224 ALIGNED_ARRAY_8( int16_t, mv,[2],[2] );
225 const int8_t *l1ref0 = &h->fref1[0]->ref[0][h->mb.i_b8_xy];
226 const int8_t *l1ref1 = &h->fref1[0]->ref[1][h->mb.i_b8_xy];
227 const int16_t (*l1mv[2])[2] = { (const int16_t (*)[2]) &h->fref1[0]->mv[0][h->mb.i_b4_xy],
228 (const int16_t (*)[2]) &h->fref1[0]->mv[1][h->mb.i_b4_xy] };
229 const int type_col = h->fref1[0]->mb_type[h->mb.i_mb_xy];
230 const int partition_col = h->fref1[0]->mb_partition[h->mb.i_mb_xy];
232 h->mb.i_partition = partition_col;
234 for( int i_list = 0; i_list < 2; i_list++ )
236 int i_refa = h->mb.cache.ref[i_list][X264_SCAN8_0 - 1];
237 int16_t *mv_a = h->mb.cache.mv[i_list][X264_SCAN8_0 - 1];
238 int i_refb = h->mb.cache.ref[i_list][X264_SCAN8_0 - 8];
239 int16_t *mv_b = h->mb.cache.mv[i_list][X264_SCAN8_0 - 8];
240 int i_refc = h->mb.cache.ref[i_list][X264_SCAN8_0 - 8 + 4];
241 int16_t *mv_c = h->mb.cache.mv[i_list][X264_SCAN8_0 - 8 + 4];
244 i_refc = h->mb.cache.ref[i_list][X264_SCAN8_0 - 8 - 1];
245 mv_c = h->mb.cache.mv[i_list][X264_SCAN8_0 - 8 - 1];
248 int i_ref = X264_MIN3( (unsigned)i_refa, (unsigned)i_refb, (unsigned)i_refc );
252 M32( mv[i_list] ) = 0;
256 /* Same as x264_mb_predict_mv_16x16, but simplified to eliminate cases
257 * not relevant to spatial direct. */
258 int i_count = (i_refa == i_ref) + (i_refb == i_ref) + (i_refc == i_ref);
261 x264_median_mv( mv[i_list], mv_a, mv_b, mv_c );
264 if( i_refa == i_ref )
265 CP32( mv[i_list], mv_a );
266 else if( i_refb == i_ref )
267 CP32( mv[i_list], mv_b );
269 CP32( mv[i_list], mv_c );
273 x264_macroblock_cache_ref( h, 0, 0, 4, 4, i_list, i_ref );
274 x264_macroblock_cache_mv_ptr( h, 0, 0, 4, 4, i_list, mv[i_list] );
278 if( (M16( ref ) & 0x8080) == 0x8080 ) /* if( ref[0] < 0 && ref[1] < 0 ) */
280 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 0, 0 );
281 x264_macroblock_cache_ref( h, 0, 0, 4, 4, 1, 0 );
285 if( h->param.i_threads > 1
286 && ( mv[0][1] > h->mb.mv_max_spel[1]
287 || mv[1][1] > h->mb.mv_max_spel[1] ) )
290 fprintf(stderr, "direct_spatial: (%d,%d) (%d,%d) > %d \n",
291 mv[0][0], mv[0][1], mv[1][0], mv[1][1],
292 h->mb.mv_max_spel[1]);
297 if( !M64( mv ) || IS_INTRA( type_col ) || (ref[0]&&ref[1]) )
300 /* Don't do any checks other than the ones we have to, based
301 * on the size of the colocated partitions.
302 * Depends on the enum order: D_8x8, D_16x8, D_8x16, D_16x16 */
303 int max_i8 = (D_16x16 - partition_col) + 1;
304 int step = (partition_col == D_16x8) + 1;
305 int width = 4 >> ((D_16x16 - partition_col)&1);
306 int height = 4 >> ((D_16x16 - partition_col)>>1);
309 for( int i8 = 0; i8 < max_i8; i8 += step )
312 const int y8 = i8>>1;
313 const int o8 = x8 + y8 * h->mb.i_b8_stride;
314 const int o4 = 3*(x8 + y8 * h->mb.i_b4_stride);
316 if( l1ref0[o8] == 0 )
318 else if( l1ref0[o8] < 0 && l1ref1[o8] == 0 )
323 if( abs( l1mv[idx][o4][0] ) <= 1 && abs( l1mv[idx][o4][1] ) <= 1 )
325 if( ref[0] == 0 ) x264_macroblock_cache_mv( h, 2*x8, 2*y8, width, height, 0, 0 );
326 if( ref[1] == 0 ) x264_macroblock_cache_mv( h, 2*x8, 2*y8, width, height, 1, 0 );
333 int x264_mb_predict_mv_direct16x16( x264_t *h, int *b_changed )
336 if( h->param.analyse.i_direct_mv_pred == X264_DIRECT_PRED_NONE )
338 else if( h->sh.b_direct_spatial_mv_pred )
339 b_available = x264_mb_predict_mv_direct16x16_spatial( h );
341 b_available = x264_mb_predict_mv_direct16x16_temporal( h );
343 if( b_changed != NULL && b_available )
347 changed = M32( h->mb.cache.direct_mv[0][0] ) ^ M32( h->mb.cache.mv[0][x264_scan8[0]] );
348 changed |= M32( h->mb.cache.direct_mv[1][0] ) ^ M32( h->mb.cache.mv[1][x264_scan8[0]] );
349 changed |= h->mb.cache.direct_ref[0][0] ^ h->mb.cache.ref[0][x264_scan8[0]];
350 changed |= h->mb.cache.direct_ref[1][0] ^ h->mb.cache.ref[1][x264_scan8[0]];
351 if( !changed && h->mb.i_partition != D_16x16 )
353 changed |= M32( h->mb.cache.direct_mv[0][3] ) ^ M32( h->mb.cache.mv[0][x264_scan8[12]] );
354 changed |= M32( h->mb.cache.direct_mv[1][3] ) ^ M32( h->mb.cache.mv[1][x264_scan8[12]] );
355 changed |= h->mb.cache.direct_ref[0][3] ^ h->mb.cache.ref[0][x264_scan8[12]];
356 changed |= h->mb.cache.direct_ref[1][3] ^ h->mb.cache.ref[1][x264_scan8[12]];
358 if( !changed && h->mb.i_partition == D_8x8 )
360 changed |= M32( h->mb.cache.direct_mv[0][1] ) ^ M32( h->mb.cache.mv[0][x264_scan8[4]] );
361 changed |= M32( h->mb.cache.direct_mv[1][1] ) ^ M32( h->mb.cache.mv[1][x264_scan8[4]] );
362 changed |= M32( h->mb.cache.direct_mv[0][2] ) ^ M32( h->mb.cache.mv[0][x264_scan8[8]] );
363 changed |= M32( h->mb.cache.direct_mv[1][2] ) ^ M32( h->mb.cache.mv[1][x264_scan8[8]] );
364 changed |= h->mb.cache.direct_ref[0][1] ^ h->mb.cache.ref[0][x264_scan8[4]];
365 changed |= h->mb.cache.direct_ref[1][1] ^ h->mb.cache.ref[1][x264_scan8[4]];
366 changed |= h->mb.cache.direct_ref[0][2] ^ h->mb.cache.ref[0][x264_scan8[8]];
367 changed |= h->mb.cache.direct_ref[1][2] ^ h->mb.cache.ref[1][x264_scan8[8]];
369 *b_changed = changed;
376 for( int l = 0; l < 2; l++ )
378 CP32( h->mb.cache.direct_mv[l][0], h->mb.cache.mv[l][x264_scan8[ 0]] );
379 CP32( h->mb.cache.direct_mv[l][1], h->mb.cache.mv[l][x264_scan8[ 4]] );
380 CP32( h->mb.cache.direct_mv[l][2], h->mb.cache.mv[l][x264_scan8[ 8]] );
381 CP32( h->mb.cache.direct_mv[l][3], h->mb.cache.mv[l][x264_scan8[12]] );
382 h->mb.cache.direct_ref[l][0] = h->mb.cache.ref[l][x264_scan8[ 0]];
383 h->mb.cache.direct_ref[l][1] = h->mb.cache.ref[l][x264_scan8[ 4]];
384 h->mb.cache.direct_ref[l][2] = h->mb.cache.ref[l][x264_scan8[ 8]];
385 h->mb.cache.direct_ref[l][3] = h->mb.cache.ref[l][x264_scan8[12]];
386 h->mb.cache.direct_partition = h->mb.i_partition;
392 /* This just improves encoder performance, it's not part of the spec */
393 void x264_mb_predict_mv_ref16x16( x264_t *h, int i_list, int i_ref, int16_t mvc[9][2], int *i_mvc )
395 int16_t (*mvr)[2] = h->mb.mvr[i_list][i_ref];
398 #define SET_MVP(mvp)\
400 CP32( mvc[i], mvp ); \
405 if( h->sh.i_type == SLICE_TYPE_B
406 && h->mb.cache.ref[i_list][x264_scan8[12]] == i_ref )
408 SET_MVP( h->mb.cache.mv[i_list][x264_scan8[12]] );
411 if( i_ref == 0 && h->frames.b_have_lowres )
413 int16_t (*lowres_mv)[2] = i_list ? h->fenc->lowres_mvs[1][h->fref1[0]->i_frame-h->fenc->i_frame-1]
414 : h->fenc->lowres_mvs[0][h->fenc->i_frame-h->fref0[0]->i_frame-1];
415 if( lowres_mv[0][0] != 0x7fff )
417 M32( mvc[i] ) = (M32( lowres_mv[h->mb.i_mb_xy] )*2)&0xfffeffff;
422 /* spatial predictors */
423 if( h->mb.i_neighbour & MB_LEFT )
425 int i_mb_l = h->mb.i_mb_xy - 1;
426 SET_MVP( mvr[i_mb_l] );
428 if( h->mb.i_neighbour & MB_TOP )
430 int i_mb_t = h->mb.i_mb_top_xy;
431 SET_MVP( mvr[i_mb_t] );
433 if( h->mb.i_neighbour & MB_TOPLEFT )
434 SET_MVP( mvr[i_mb_t-1] );
435 if( h->mb.i_mb_x < h->mb.i_mb_stride - 1 )
436 SET_MVP( mvr[i_mb_t+1] );
440 /* temporal predictors */
441 if( h->fref0[0]->i_ref[0] > 0 )
443 x264_frame_t *l0 = h->fref0[0];
444 x264_frame_t **fref = i_list ? h->fref1 : h->fref0;
445 int field = h->mb.i_mb_y&1;
446 int curpoc = h->fdec->i_poc + field*h->sh.i_delta_poc_bottom;
447 int refpoc = fref[i_ref>>h->sh.b_mbaff]->i_poc;
448 if( h->sh.b_mbaff && field^(i_ref&1) )
449 refpoc += h->sh.i_delta_poc_bottom;
451 #define SET_TMVP(dx, dy) { \
452 int i_b4 = h->mb.i_b4_xy + dx*4 + dy*4*h->mb.i_b4_stride; \
453 int i_b8 = h->mb.i_b8_xy + dx*2 + dy*2*h->mb.i_b8_stride; \
454 int ref_col = l0->ref[0][i_b8]; \
457 int scale = (curpoc - refpoc) * l0->inv_ref_poc[h->mb.b_interlaced&field][ref_col];\
458 mvc[i][0] = (l0->mv[0][i_b4][0]*scale + 128) >> 8;\
459 mvc[i][1] = (l0->mv[0][i_b4][1]*scale + 128) >> 8;\
465 if( h->mb.i_mb_x < h->sps->i_mb_width-1 )
467 if( h->mb.i_mb_y < h->sps->i_mb_height-1 )
475 /* Set up a lookup table for delta pocs to reduce an IDIV to an IMUL */
476 static void setup_inverse_delta_pocs( x264_t *h )
478 for( int field = 0; field <= h->sh.b_mbaff; field++ )
480 int curpoc = h->fdec->i_poc + field*h->sh.i_delta_poc_bottom;
481 for( int i = 0; i < (h->i_ref0<<h->sh.b_mbaff); i++ )
483 int refpoc = h->fref0[i>>h->sh.b_mbaff]->i_poc;
484 if( h->sh.b_mbaff && field^(i&1) )
485 refpoc += h->sh.i_delta_poc_bottom;
486 int delta = curpoc - refpoc;
488 h->fdec->inv_ref_poc[field][i] = (256 + delta/2) / delta;
493 static NOINLINE void x264_mb_mc_0xywh( x264_t *h, int x, int y, int width, int height )
495 int i8 = x264_scan8[0]+x+8*y;
496 int i_ref = h->mb.cache.ref[0][i8];
497 int mvx = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
498 int mvy = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
500 h->mc.mc_luma( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE,
501 h->mb.pic.p_fref[0][i_ref], h->mb.pic.i_stride[0],
502 mvx, mvy, 4*width, 4*height, &h->sh.weight[i_ref][0] );
504 // chroma is offset if MCing from a field of opposite parity
505 if( h->mb.b_interlaced & i_ref )
506 mvy += (h->mb.i_mb_y & 1)*4 - 2;
508 h->mc.mc_chroma( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
509 h->mb.pic.p_fref[0][i_ref][4], h->mb.pic.i_stride[1],
510 mvx, mvy, 2*width, 2*height );
512 if( h->sh.weight[i_ref][1].weightfn )
513 h->sh.weight[i_ref][1].weightfn[width>>1]( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
514 &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
515 &h->sh.weight[i_ref][1], height*2 );
517 h->mc.mc_chroma( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
518 h->mb.pic.p_fref[0][i_ref][5], h->mb.pic.i_stride[2],
519 mvx, mvy, 2*width, 2*height );
521 if( h->sh.weight[i_ref][2].weightfn )
522 h->sh.weight[i_ref][2].weightfn[width>>1]( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
523 &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
524 &h->sh.weight[i_ref][2],height*2 );
527 static NOINLINE void x264_mb_mc_1xywh( x264_t *h, int x, int y, int width, int height )
529 int i8 = x264_scan8[0]+x+8*y;
530 int i_ref = h->mb.cache.ref[1][i8];
531 int mvx = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
532 int mvy = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
534 h->mc.mc_luma( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE,
535 h->mb.pic.p_fref[1][i_ref], h->mb.pic.i_stride[0],
536 mvx, mvy, 4*width, 4*height, weight_none );
538 if( h->mb.b_interlaced & i_ref )
539 mvy += (h->mb.i_mb_y & 1)*4 - 2;
541 h->mc.mc_chroma( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
542 h->mb.pic.p_fref[1][i_ref][4], h->mb.pic.i_stride[1],
543 mvx, mvy, 2*width, 2*height );
545 h->mc.mc_chroma( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE,
546 h->mb.pic.p_fref[1][i_ref][5], h->mb.pic.i_stride[2],
547 mvx, mvy, 2*width, 2*height );
550 static NOINLINE void x264_mb_mc_01xywh( x264_t *h, int x, int y, int width, int height )
552 int i8 = x264_scan8[0]+x+8*y;
553 int i_ref0 = h->mb.cache.ref[0][i8];
554 int i_ref1 = h->mb.cache.ref[1][i8];
555 int weight = h->mb.bipred_weight[i_ref0][i_ref1];
556 int mvx0 = x264_clip3( h->mb.cache.mv[0][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
557 int mvx1 = x264_clip3( h->mb.cache.mv[1][i8][0], h->mb.mv_min[0], h->mb.mv_max[0] ) + 4*4*x;
558 int mvy0 = x264_clip3( h->mb.cache.mv[0][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
559 int mvy1 = x264_clip3( h->mb.cache.mv[1][i8][1], h->mb.mv_min[1], h->mb.mv_max[1] ) + 4*4*y;
560 int i_mode = x264_size2pixel[height][width];
561 int i_stride0 = 16, i_stride1 = 16;
562 ALIGNED_ARRAY_16( uint8_t, tmp0,[16*16] );
563 ALIGNED_ARRAY_16( uint8_t, tmp1,[16*16] );
564 uint8_t *src0, *src1;
566 src0 = h->mc.get_ref( tmp0, &i_stride0, h->mb.pic.p_fref[0][i_ref0], h->mb.pic.i_stride[0],
567 mvx0, mvy0, 4*width, 4*height, weight_none );
568 src1 = h->mc.get_ref( tmp1, &i_stride1, h->mb.pic.p_fref[1][i_ref1], h->mb.pic.i_stride[0],
569 mvx1, mvy1, 4*width, 4*height, weight_none );
570 h->mc.avg[i_mode]( &h->mb.pic.p_fdec[0][4*y*FDEC_STRIDE+4*x], FDEC_STRIDE,
571 src0, i_stride0, src1, i_stride1, weight );
573 if( h->mb.b_interlaced & i_ref0 )
574 mvy0 += (h->mb.i_mb_y & 1)*4 - 2;
575 if( h->mb.b_interlaced & i_ref1 )
576 mvy1 += (h->mb.i_mb_y & 1)*4 - 2;
578 h->mc.mc_chroma( tmp0, 16, h->mb.pic.p_fref[0][i_ref0][4], h->mb.pic.i_stride[1],
579 mvx0, mvy0, 2*width, 2*height );
580 h->mc.mc_chroma( tmp1, 16, h->mb.pic.p_fref[1][i_ref1][4], h->mb.pic.i_stride[1],
581 mvx1, mvy1, 2*width, 2*height );
582 h->mc.avg[i_mode+3]( &h->mb.pic.p_fdec[1][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp0, 16, tmp1, 16, weight );
583 h->mc.mc_chroma( tmp0, 16, h->mb.pic.p_fref[0][i_ref0][5], h->mb.pic.i_stride[2],
584 mvx0, mvy0, 2*width, 2*height );
585 h->mc.mc_chroma( tmp1, 16, h->mb.pic.p_fref[1][i_ref1][5], h->mb.pic.i_stride[2],
586 mvx1, mvy1, 2*width, 2*height );
587 h->mc.avg[i_mode+3]( &h->mb.pic.p_fdec[2][2*y*FDEC_STRIDE+2*x], FDEC_STRIDE, tmp0, 16, tmp1, 16, weight );
590 void x264_mb_mc_8x8( x264_t *h, int i8 )
595 if( h->sh.i_type == SLICE_TYPE_P )
597 switch( h->mb.i_sub_partition[i8] )
600 x264_mb_mc_0xywh( h, x, y, 2, 2 );
603 x264_mb_mc_0xywh( h, x, y+0, 2, 1 );
604 x264_mb_mc_0xywh( h, x, y+1, 2, 1 );
607 x264_mb_mc_0xywh( h, x+0, y, 1, 2 );
608 x264_mb_mc_0xywh( h, x+1, y, 1, 2 );
611 x264_mb_mc_0xywh( h, x+0, y+0, 1, 1 );
612 x264_mb_mc_0xywh( h, x+1, y+0, 1, 1 );
613 x264_mb_mc_0xywh( h, x+0, y+1, 1, 1 );
614 x264_mb_mc_0xywh( h, x+1, y+1, 1, 1 );
620 int scan8 = x264_scan8[0] + x + 8*y;
622 if( h->mb.cache.ref[0][scan8] >= 0 )
623 if( h->mb.cache.ref[1][scan8] >= 0 )
624 x264_mb_mc_01xywh( h, x, y, 2, 2 );
626 x264_mb_mc_0xywh( h, x, y, 2, 2 );
628 x264_mb_mc_1xywh( h, x, y, 2, 2 );
632 void x264_mb_mc( x264_t *h )
634 if( h->mb.i_partition == D_8x8 )
636 for( int i = 0; i < 4; i++ )
637 x264_mb_mc_8x8( h, i );
641 int ref0a = h->mb.cache.ref[0][x264_scan8[ 0]];
642 int ref0b = h->mb.cache.ref[0][x264_scan8[12]];
643 int ref1a = h->mb.cache.ref[1][x264_scan8[ 0]];
644 int ref1b = h->mb.cache.ref[1][x264_scan8[12]];
646 if( h->mb.i_partition == D_16x16 )
649 if( ref1a >= 0 ) x264_mb_mc_01xywh( h, 0, 0, 4, 4 );
650 else x264_mb_mc_0xywh ( h, 0, 0, 4, 4 );
651 else x264_mb_mc_1xywh ( h, 0, 0, 4, 4 );
653 else if( h->mb.i_partition == D_16x8 )
656 if( ref1a >= 0 ) x264_mb_mc_01xywh( h, 0, 0, 4, 2 );
657 else x264_mb_mc_0xywh ( h, 0, 0, 4, 2 );
658 else x264_mb_mc_1xywh ( h, 0, 0, 4, 2 );
661 if( ref1b >= 0 ) x264_mb_mc_01xywh( h, 0, 2, 4, 2 );
662 else x264_mb_mc_0xywh ( h, 0, 2, 4, 2 );
663 else x264_mb_mc_1xywh ( h, 0, 2, 4, 2 );
665 else if( h->mb.i_partition == D_8x16 )
668 if( ref1a >= 0 ) x264_mb_mc_01xywh( h, 0, 0, 2, 4 );
669 else x264_mb_mc_0xywh ( h, 0, 0, 2, 4 );
670 else x264_mb_mc_1xywh ( h, 0, 0, 2, 4 );
673 if( ref1b >= 0 ) x264_mb_mc_01xywh( h, 2, 0, 2, 4 );
674 else x264_mb_mc_0xywh ( h, 2, 0, 2, 4 );
675 else x264_mb_mc_1xywh ( h, 2, 0, 2, 4 );
680 int x264_macroblock_cache_init( x264_t *h )
682 int i_mb_count = h->mb.i_mb_count;
684 h->mb.i_mb_stride = h->sps->i_mb_width;
685 h->mb.i_b8_stride = h->sps->i_mb_width * 2;
686 h->mb.i_b4_stride = h->sps->i_mb_width * 4;
688 h->mb.b_interlaced = h->param.b_interlaced;
690 CHECKED_MALLOC( h->mb.qp, i_mb_count * sizeof(int8_t) );
691 CHECKED_MALLOC( h->mb.cbp, i_mb_count * sizeof(int16_t) );
692 CHECKED_MALLOC( h->mb.skipbp, i_mb_count * sizeof(int8_t) );
693 CHECKED_MALLOC( h->mb.mb_transform_size, i_mb_count * sizeof(int8_t) );
695 /* 0 -> 3 top(4), 4 -> 6 : left(3) */
696 CHECKED_MALLOC( h->mb.intra4x4_pred_mode, i_mb_count * 8 * sizeof(int8_t) );
699 CHECKED_MALLOC( h->mb.non_zero_count, i_mb_count * 24 * sizeof(uint8_t) );
701 if( h->param.b_cabac )
703 CHECKED_MALLOC( h->mb.chroma_pred_mode, i_mb_count * sizeof(int8_t) );
704 CHECKED_MALLOC( h->mb.mvd[0], i_mb_count * sizeof( **h->mb.mvd ) );
705 CHECKED_MALLOC( h->mb.mvd[1], i_mb_count * sizeof( **h->mb.mvd ) );
708 for( int i = 0; i < 2; i++ )
710 int i_refs = X264_MIN(16, (i ? 1 + !!h->param.i_bframe_pyramid : h->param.i_frame_reference) ) << h->param.b_interlaced;
711 if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
712 i_refs = X264_MIN(16, i_refs + 2); //smart weights add two duplicate frames
713 else if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_BLIND )
714 i_refs = X264_MIN(16, i_refs + 1); //blind weights add one duplicate frame
716 for( int j = 0; j < i_refs; j++ )
717 CHECKED_MALLOC( h->mb.mvr[i][j], 2 * i_mb_count * sizeof(int16_t) );
720 if( h->param.analyse.i_weighted_pred )
722 int i_padv = PADV << h->param.b_interlaced;
723 #define ALIGN(x,a) (((x)+((a)-1))&~((a)-1))
724 int align = h->param.cpu&X264_CPU_CACHELINE_64 ? 64 : h->param.cpu&X264_CPU_CACHELINE_32 ? 32 : 16;
725 int i_stride, luma_plane_size = 0;
728 if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_FAKE )
730 // only need buffer for lookahead
731 if( !h->param.i_sync_lookahead || h == h->thread[h->param.i_threads] )
733 // Fake analysis only works on lowres
734 i_stride = ALIGN( h->sps->i_mb_width*8 + 2*PADH, align );
735 luma_plane_size = i_stride * (h->sps->i_mb_height*8+2*i_padv);
736 // Only need 1 buffer for analysis
744 i_stride = ALIGN( h->sps->i_mb_width*16 + 2*PADH, align );
745 luma_plane_size = i_stride * (h->sps->i_mb_height*16+2*i_padv);
747 if( h->param.analyse.i_weighted_pred == X264_WEIGHTP_SMART )
748 //SMART can weight one ref and one offset -1
751 //blind only has one weighted copy (offset -1)
755 for( int i = 0; i < numweightbuf; i++ )
756 CHECKED_MALLOC( h->mb.p_weight_buf[i], luma_plane_size );
760 for( int i = 0; i <= h->param.b_interlaced; i++ )
761 for( int j = 0; j < 3; j++ )
763 /* shouldn't really be initialized, just silences a valgrind false-positive in predict_8x8_filter_mmx */
764 CHECKED_MALLOCZERO( h->mb.intra_border_backup[i][j], (h->sps->i_mb_width*16+32)>>!!j );
765 h->mb.intra_border_backup[i][j] += 8;
771 void x264_macroblock_cache_end( x264_t *h )
773 for( int i = 0; i <= h->param.b_interlaced; i++ )
774 for( int j = 0; j < 3; j++ )
775 x264_free( h->mb.intra_border_backup[i][j] - 8 );
776 for( int i = 0; i < 2; i++ )
777 for( int j = 0; j < 32; j++ )
778 x264_free( h->mb.mvr[i][j] );
779 for( int i = 0; i < 16; i++ )
780 x264_free( h->mb.p_weight_buf[i] );
782 if( h->param.b_cabac )
784 x264_free( h->mb.chroma_pred_mode );
785 x264_free( h->mb.mvd[0] );
786 x264_free( h->mb.mvd[1] );
788 x264_free( h->mb.intra4x4_pred_mode );
789 x264_free( h->mb.non_zero_count );
790 x264_free( h->mb.mb_transform_size );
791 x264_free( h->mb.skipbp );
792 x264_free( h->mb.cbp );
793 x264_free( h->mb.qp );
795 void x264_macroblock_slice_init( x264_t *h )
797 h->mb.mv[0] = h->fdec->mv[0];
798 h->mb.mv[1] = h->fdec->mv[1];
799 h->mb.ref[0] = h->fdec->ref[0];
800 h->mb.ref[1] = h->fdec->ref[1];
801 h->mb.type = h->fdec->mb_type;
802 h->mb.partition = h->fdec->mb_partition;
804 h->fdec->i_ref[0] = h->i_ref0;
805 h->fdec->i_ref[1] = h->i_ref1;
806 for( int i = 0; i < h->i_ref0; i++ )
807 h->fdec->ref_poc[0][i] = h->fref0[i]->i_poc;
808 if( h->sh.i_type == SLICE_TYPE_B )
810 for( int i = 0; i < h->i_ref1; i++ )
811 h->fdec->ref_poc[1][i] = h->fref1[i]->i_poc;
813 map_col_to_list0(-1) = -1;
814 map_col_to_list0(-2) = -2;
815 for( int i = 0; i < h->fref1[0]->i_ref[0]; i++ )
817 int poc = h->fref1[0]->ref_poc[0][i];
818 map_col_to_list0(i) = -2;
819 for( int j = 0; j < h->i_ref0; j++ )
820 if( h->fref0[j]->i_poc == poc )
822 map_col_to_list0(i) = j;
827 if( h->sh.i_type == SLICE_TYPE_P )
828 memset( h->mb.cache.skip, 0, sizeof( h->mb.cache.skip ) );
830 /* init with not available (for top right idx=7,15) */
831 memset( h->mb.cache.ref, -2, sizeof( h->mb.cache.ref ) );
833 setup_inverse_delta_pocs( h );
835 h->mb.i_neighbour4[6] =
836 h->mb.i_neighbour4[9] =
837 h->mb.i_neighbour4[12] =
838 h->mb.i_neighbour4[14] = MB_LEFT|MB_TOP|MB_TOPLEFT|MB_TOPRIGHT;
839 h->mb.i_neighbour4[3] =
840 h->mb.i_neighbour4[7] =
841 h->mb.i_neighbour4[11] =
842 h->mb.i_neighbour4[13] =
843 h->mb.i_neighbour4[15] =
844 h->mb.i_neighbour8[3] = MB_LEFT|MB_TOP|MB_TOPLEFT;
847 void x264_macroblock_thread_init( x264_t *h )
849 h->mb.i_me_method = h->param.analyse.i_me_method;
850 h->mb.i_subpel_refine = h->param.analyse.i_subpel_refine;
851 if( h->sh.i_type == SLICE_TYPE_B && (h->mb.i_subpel_refine == 6 || h->mb.i_subpel_refine == 8) )
852 h->mb.i_subpel_refine--;
853 h->mb.b_chroma_me = h->param.analyse.b_chroma_me && h->sh.i_type == SLICE_TYPE_P
854 && h->mb.i_subpel_refine >= 5;
855 h->mb.b_dct_decimate = h->sh.i_type == SLICE_TYPE_B ||
856 (h->param.analyse.b_dct_decimate && h->sh.i_type != SLICE_TYPE_I);
869 h->mb.pic.p_fenc[0] = h->mb.pic.fenc_buf;
870 h->mb.pic.p_fenc[1] = h->mb.pic.fenc_buf + 16*FENC_STRIDE;
871 h->mb.pic.p_fenc[2] = h->mb.pic.fenc_buf + 16*FENC_STRIDE + 8;
872 h->mb.pic.p_fdec[0] = h->mb.pic.fdec_buf + 2*FDEC_STRIDE;
873 h->mb.pic.p_fdec[1] = h->mb.pic.fdec_buf + 19*FDEC_STRIDE;
874 h->mb.pic.p_fdec[2] = h->mb.pic.fdec_buf + 19*FDEC_STRIDE + 16;
877 void x264_prefetch_fenc( x264_t *h, x264_frame_t *fenc, int i_mb_x, int i_mb_y )
879 int stride_y = fenc->i_stride[0];
880 int stride_uv = fenc->i_stride[1];
881 int off_y = 16 * (i_mb_x + i_mb_y * stride_y);
882 int off_uv = 8 * (i_mb_x + i_mb_y * stride_uv);
883 h->mc.prefetch_fenc( fenc->plane[0]+off_y, stride_y,
884 fenc->plane[1+(i_mb_x&1)]+off_uv, stride_uv, i_mb_x );
887 static NOINLINE void copy_column8( uint8_t *dst, uint8_t *src )
889 // input pointers are offset by 4 rows because that's faster (smaller instruction size on x86)
890 for( int i = -4; i < 4; i++ )
891 dst[i*FDEC_STRIDE] = src[i*FDEC_STRIDE];
894 static void ALWAYS_INLINE x264_macroblock_load_pic_pointers( x264_t *h, int i_mb_x, int i_mb_y, int i)
896 const int w = (i == 0 ? 16 : 8);
897 const int i_stride = h->fdec->i_stride[!!i];
898 const int i_stride2 = i_stride << h->mb.b_interlaced;
899 const int i_pix_offset = h->mb.b_interlaced
900 ? w * (i_mb_x + (i_mb_y&~1) * i_stride) + (i_mb_y&1) * i_stride
901 : w * (i_mb_x + i_mb_y * i_stride);
902 const uint8_t *plane_fdec = &h->fdec->plane[i][i_pix_offset];
903 const uint8_t *intra_fdec = h->param.b_sliced_threads ? plane_fdec-i_stride2 :
904 &h->mb.intra_border_backup[i_mb_y & h->sh.b_mbaff][i][i_mb_x*16>>!!i];
905 int ref_pix_offset[2] = { i_pix_offset, i_pix_offset };
906 x264_frame_t **fref[2] = { h->fref0, h->fref1 };
907 if( h->mb.b_interlaced )
908 ref_pix_offset[1] += (1-2*(i_mb_y&1)) * i_stride;
909 h->mb.pic.i_stride[i] = i_stride2;
910 h->mb.pic.p_fenc_plane[i] = &h->fenc->plane[i][i_pix_offset];
911 h->mc.copy[i?PIXEL_8x8:PIXEL_16x16]( h->mb.pic.p_fenc[i], FENC_STRIDE,
912 h->mb.pic.p_fenc_plane[i], i_stride2, w );
914 memcpy( &h->mb.pic.p_fdec[i][-1-FDEC_STRIDE], intra_fdec-1, w*3/2+1 );
916 memset( &h->mb.pic.p_fdec[i][-1-FDEC_STRIDE], 0, w*3/2+1 );
917 if( h->mb.b_interlaced || h->mb.b_reencode_mb )
918 for( int j = 0; j < w; j++ )
919 h->mb.pic.p_fdec[i][-1+j*FDEC_STRIDE] = plane_fdec[-1+j*i_stride2];
920 for( int j = 0; j < h->mb.pic.i_fref[0]; j++ )
922 h->mb.pic.p_fref[0][j][i==0 ? 0:i+3] = &fref[0][j >> h->mb.b_interlaced]->plane[i][ref_pix_offset[j&1]];
925 for( int k = 1; k < 4; k++ )
926 h->mb.pic.p_fref[0][j][k] = &fref[0][j >> h->mb.b_interlaced]->filtered[k][ref_pix_offset[j&1]];
927 if( h->sh.weight[j][0].weightfn )
928 h->mb.pic.p_fref_w[j] = &h->fenc->weighted[j >> h->mb.b_interlaced][ref_pix_offset[j&1]];
930 h->mb.pic.p_fref_w[j] = h->mb.pic.p_fref[0][j][0];
933 if( h->sh.i_type == SLICE_TYPE_B )
934 for( int j = 0; j < h->mb.pic.i_fref[1]; j++ )
936 h->mb.pic.p_fref[1][j][i==0 ? 0:i+3] = &fref[1][j >> h->mb.b_interlaced]->plane[i][ref_pix_offset[j&1]];
938 for( int k = 1; k < 4; k++ )
939 h->mb.pic.p_fref[1][j][k] = &fref[1][j >> h->mb.b_interlaced]->filtered[k][ref_pix_offset[j&1]];
943 void x264_macroblock_cache_load( x264_t *h, int i_mb_x, int i_mb_y )
945 int i_mb_xy = i_mb_y * h->mb.i_mb_stride + i_mb_x;
946 int i_mb_4x4 = 4*(i_mb_y * h->mb.i_b4_stride + i_mb_x);
947 int i_mb_8x8 = 2*(i_mb_y * h->mb.i_b8_stride + i_mb_x);
948 int i_top_y = i_mb_y - (1 << h->mb.b_interlaced);
949 int i_top_xy = i_top_y * h->mb.i_mb_stride + i_mb_x;
950 int i_top_4x4 = (4*i_top_y+3) * h->mb.i_b4_stride + 4*i_mb_x;
951 int i_top_8x8 = (2*i_top_y+1) * h->mb.i_b8_stride + 2*i_mb_x;
953 int i_top_type = -1; /* gcc warn */
957 h->mb.i_mb_x = i_mb_x;
958 h->mb.i_mb_y = i_mb_y;
959 h->mb.i_mb_xy = i_mb_xy;
960 h->mb.i_b8_xy = i_mb_8x8;
961 h->mb.i_b4_xy = i_mb_4x4;
962 h->mb.i_mb_top_xy = i_top_xy;
963 h->mb.i_neighbour = 0;
964 h->mb.i_neighbour_intra = 0;
967 if( i_top_xy >= h->sh.i_first_mb )
969 h->mb.i_mb_type_top =
970 i_top_type = h->mb.type[i_top_xy];
971 h->mb.cache.i_cbp_top = h->mb.cbp[i_top_xy];
973 h->mb.i_neighbour |= MB_TOP;
975 if( !h->param.b_constrained_intra || IS_INTRA( i_top_type ) )
976 h->mb.i_neighbour_intra |= MB_TOP;
979 CP32( &h->mb.cache.intra4x4_pred_mode[x264_scan8[0] - 8], &h->mb.intra4x4_pred_mode[i_top_xy][0] );
981 /* load non_zero_count */
982 CP32( &h->mb.cache.non_zero_count[x264_scan8[0] - 8], &h->mb.non_zero_count[i_top_xy][12] );
983 /* shift because x264_scan8[16] is misaligned */
984 M32( &h->mb.cache.non_zero_count[x264_scan8[16+0] - 9] ) = M16( &h->mb.non_zero_count[i_top_xy][18] ) << 8;
985 M32( &h->mb.cache.non_zero_count[x264_scan8[16+4] - 9] ) = M16( &h->mb.non_zero_count[i_top_xy][22] ) << 8;
989 h->mb.i_mb_type_top = -1;
990 h->mb.cache.i_cbp_top = -1;
993 M32( &h->mb.cache.intra4x4_pred_mode[x264_scan8[0] - 8] ) = 0xFFFFFFFFU;
995 /* load non_zero_count */
996 M32( &h->mb.cache.non_zero_count[x264_scan8[ 0] - 8] ) = 0x80808080U;
997 M32( &h->mb.cache.non_zero_count[x264_scan8[16+0] - 9] ) = 0x80808080U;
998 M32( &h->mb.cache.non_zero_count[x264_scan8[16+4] - 9] ) = 0x80808080U;
1001 if( i_mb_x > 0 && i_mb_xy > h->sh.i_first_mb )
1003 i_left_xy = i_mb_xy - 1;
1004 h->mb.i_mb_type_left =
1005 i_left_type = h->mb.type[i_left_xy];
1006 h->mb.cache.i_cbp_left = h->mb.cbp[h->mb.i_mb_xy - 1];
1008 h->mb.i_neighbour |= MB_LEFT;
1010 if( !h->param.b_constrained_intra || IS_INTRA( i_left_type ) )
1011 h->mb.i_neighbour_intra |= MB_LEFT;
1014 h->mb.cache.intra4x4_pred_mode[x264_scan8[0 ] - 1] = h->mb.intra4x4_pred_mode[i_left_xy][4];
1015 h->mb.cache.intra4x4_pred_mode[x264_scan8[2 ] - 1] = h->mb.intra4x4_pred_mode[i_left_xy][5];
1016 h->mb.cache.intra4x4_pred_mode[x264_scan8[8 ] - 1] = h->mb.intra4x4_pred_mode[i_left_xy][6];
1017 h->mb.cache.intra4x4_pred_mode[x264_scan8[10] - 1] = h->mb.intra4x4_pred_mode[i_left_xy][3];
1019 /* load non_zero_count */
1020 h->mb.cache.non_zero_count[x264_scan8[0 ] - 1] = h->mb.non_zero_count[i_left_xy][3];
1021 h->mb.cache.non_zero_count[x264_scan8[2 ] - 1] = h->mb.non_zero_count[i_left_xy][7];
1022 h->mb.cache.non_zero_count[x264_scan8[8 ] - 1] = h->mb.non_zero_count[i_left_xy][11];
1023 h->mb.cache.non_zero_count[x264_scan8[10] - 1] = h->mb.non_zero_count[i_left_xy][15];
1025 h->mb.cache.non_zero_count[x264_scan8[16+0] - 1] = h->mb.non_zero_count[i_left_xy][16+1];
1026 h->mb.cache.non_zero_count[x264_scan8[16+2] - 1] = h->mb.non_zero_count[i_left_xy][16+3];
1028 h->mb.cache.non_zero_count[x264_scan8[16+4+0] - 1] = h->mb.non_zero_count[i_left_xy][16+4+1];
1029 h->mb.cache.non_zero_count[x264_scan8[16+4+2] - 1] = h->mb.non_zero_count[i_left_xy][16+4+3];
1033 h->mb.i_mb_type_left = -1;
1034 h->mb.cache.i_cbp_left = -1;
1036 h->mb.cache.intra4x4_pred_mode[x264_scan8[0 ] - 1] =
1037 h->mb.cache.intra4x4_pred_mode[x264_scan8[2 ] - 1] =
1038 h->mb.cache.intra4x4_pred_mode[x264_scan8[8 ] - 1] =
1039 h->mb.cache.intra4x4_pred_mode[x264_scan8[10] - 1] = -1;
1041 /* load non_zero_count */
1042 h->mb.cache.non_zero_count[x264_scan8[0 ] - 1] =
1043 h->mb.cache.non_zero_count[x264_scan8[2 ] - 1] =
1044 h->mb.cache.non_zero_count[x264_scan8[8 ] - 1] =
1045 h->mb.cache.non_zero_count[x264_scan8[10] - 1] =
1046 h->mb.cache.non_zero_count[x264_scan8[16+0] - 1] =
1047 h->mb.cache.non_zero_count[x264_scan8[16+2] - 1] =
1048 h->mb.cache.non_zero_count[x264_scan8[16+4+0] - 1] =
1049 h->mb.cache.non_zero_count[x264_scan8[16+4+2] - 1] = 0x80;
1052 if( i_mb_x < h->sps->i_mb_width - 1 && i_top_xy + 1 >= h->sh.i_first_mb )
1054 h->mb.i_neighbour |= MB_TOPRIGHT;
1055 h->mb.i_mb_type_topright = h->mb.type[ i_top_xy + 1 ];
1056 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_topright ) )
1057 h->mb.i_neighbour_intra |= MB_TOPRIGHT;
1060 h->mb.i_mb_type_topright = -1;
1061 if( i_mb_x > 0 && i_top_xy - 1 >= h->sh.i_first_mb )
1063 h->mb.i_neighbour |= MB_TOPLEFT;
1064 h->mb.i_mb_type_topleft = h->mb.type[ i_top_xy - 1 ];
1065 if( !h->param.b_constrained_intra || IS_INTRA( h->mb.i_mb_type_topleft ) )
1066 h->mb.i_neighbour_intra |= MB_TOPLEFT;
1069 h->mb.i_mb_type_topleft = -1;
1071 if( h->pps->b_transform_8x8_mode )
1073 h->mb.cache.i_neighbour_transform_size =
1074 ( i_left_type >= 0 && h->mb.mb_transform_size[i_left_xy] )
1075 + ( i_top_type >= 0 && h->mb.mb_transform_size[i_top_xy] );
1080 h->mb.pic.i_fref[0] = h->i_ref0 << h->mb.b_interlaced;
1081 h->mb.pic.i_fref[1] = h->i_ref1 << h->mb.b_interlaced;
1082 h->mb.cache.i_neighbour_interlaced =
1083 !!(h->mb.i_neighbour & MB_LEFT)
1084 + !!(h->mb.i_neighbour & MB_TOP);
1087 if( !h->mb.b_interlaced && !h->mb.b_reencode_mb )
1089 copy_column8( h->mb.pic.p_fdec[0]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+ 4*FDEC_STRIDE );
1090 copy_column8( h->mb.pic.p_fdec[0]-1+12*FDEC_STRIDE, h->mb.pic.p_fdec[0]+15+12*FDEC_STRIDE );
1091 copy_column8( h->mb.pic.p_fdec[1]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[1]+ 7+ 4*FDEC_STRIDE );
1092 copy_column8( h->mb.pic.p_fdec[2]-1+ 4*FDEC_STRIDE, h->mb.pic.p_fdec[2]+ 7+ 4*FDEC_STRIDE );
1095 /* load picture pointers */
1096 x264_macroblock_load_pic_pointers( h, i_mb_x, i_mb_y, 0 );
1097 x264_macroblock_load_pic_pointers( h, i_mb_x, i_mb_y, 1 );
1098 x264_macroblock_load_pic_pointers( h, i_mb_x, i_mb_y, 2 );
1100 if( h->fdec->integral )
1102 assert( !h->mb.b_interlaced );
1103 for( int i = 0; i < h->mb.pic.i_fref[0]; i++ )
1104 h->mb.pic.p_integral[0][i] = &h->fref0[i]->integral[ 16 * ( i_mb_x + i_mb_y * h->fdec->i_stride[0] )];
1105 for( int i = 0; i < h->mb.pic.i_fref[1]; i++ )
1106 h->mb.pic.p_integral[1][i] = &h->fref1[i]->integral[ 16 * ( i_mb_x + i_mb_y * h->fdec->i_stride[0] )];
1109 x264_prefetch_fenc( h, h->fenc, i_mb_x, i_mb_y );
1111 /* load ref/mv/mvd */
1112 if( h->sh.i_type != SLICE_TYPE_I )
1114 const int s8x8 = h->mb.i_b8_stride;
1115 const int s4x4 = h->mb.i_b4_stride;
1117 for( int i_list = 0; i_list < (h->sh.i_type == SLICE_TYPE_B ? 2 : 1 ); i_list++ )
1120 h->mb.cache.ref[i_list][x264_scan8[5 ]+1] =
1121 h->mb.cache.ref[i_list][x264_scan8[7 ]+1] =
1122 h->mb.cache.ref[i_list][x264_scan8[13]+1] = -2;
1125 if( h->mb.i_neighbour & MB_TOPLEFT )
1127 const int i8 = x264_scan8[0] - 1 - 1*8;
1128 const int ir = i_top_8x8 - 1;
1129 const int iv = i_top_4x4 - 1;
1130 h->mb.cache.ref[i_list][i8] = h->mb.ref[i_list][ir];
1131 CP32( h->mb.cache.mv[i_list][i8], h->mb.mv[i_list][iv] );
1135 const int i8 = x264_scan8[0] - 1 - 1*8;
1136 h->mb.cache.ref[i_list][i8] = -2;
1137 M32( h->mb.cache.mv[i_list][i8] ) = 0;
1140 if( h->mb.i_neighbour & MB_TOP )
1142 const int i8 = x264_scan8[0] - 8;
1143 const int ir = i_top_8x8;
1144 const int iv = i_top_4x4;
1145 h->mb.cache.ref[i_list][i8+0] =
1146 h->mb.cache.ref[i_list][i8+1] = h->mb.ref[i_list][ir + 0];
1147 h->mb.cache.ref[i_list][i8+2] =
1148 h->mb.cache.ref[i_list][i8+3] = h->mb.ref[i_list][ir + 1];
1149 CP64( h->mb.cache.mv[i_list][i8+0], h->mb.mv[i_list][iv+0] );
1150 CP64( h->mb.cache.mv[i_list][i8+2], h->mb.mv[i_list][iv+2] );
1154 const int i8 = x264_scan8[0] - 8;
1155 M64( h->mb.cache.mv[i_list][i8+0] ) = 0;
1156 M64( h->mb.cache.mv[i_list][i8+2] ) = 0;
1157 M32( &h->mb.cache.ref[i_list][i8] ) = (uint8_t)(-2) * 0x01010101U;
1160 if( h->mb.i_neighbour & MB_TOPRIGHT )
1162 const int i8 = x264_scan8[0] + 4 - 1*8;
1163 const int ir = i_top_8x8 + 2;
1164 const int iv = i_top_4x4 + 4;
1165 h->mb.cache.ref[i_list][i8] = h->mb.ref[i_list][ir];
1166 CP32( h->mb.cache.mv[i_list][i8], h->mb.mv[i_list][iv] );
1170 const int i8 = x264_scan8[0] + 4 - 1*8;
1171 h->mb.cache.ref[i_list][i8] = -2;
1174 if( h->mb.i_neighbour & MB_LEFT )
1176 const int i8 = x264_scan8[0] - 1;
1177 const int ir = i_mb_8x8 - 1;
1178 const int iv = i_mb_4x4 - 1;
1179 h->mb.cache.ref[i_list][i8+0*8] =
1180 h->mb.cache.ref[i_list][i8+1*8] = h->mb.ref[i_list][ir + 0*s8x8];
1181 h->mb.cache.ref[i_list][i8+2*8] =
1182 h->mb.cache.ref[i_list][i8+3*8] = h->mb.ref[i_list][ir + 1*s8x8];
1184 CP32( h->mb.cache.mv[i_list][i8+0*8], h->mb.mv[i_list][iv + 0*s4x4] );
1185 CP32( h->mb.cache.mv[i_list][i8+1*8], h->mb.mv[i_list][iv + 1*s4x4] );
1186 CP32( h->mb.cache.mv[i_list][i8+2*8], h->mb.mv[i_list][iv + 2*s4x4] );
1187 CP32( h->mb.cache.mv[i_list][i8+3*8], h->mb.mv[i_list][iv + 3*s4x4] );
1191 const int i8 = x264_scan8[0] - 1;
1192 for( int i = 0; i < 4; i++ )
1194 h->mb.cache.ref[i_list][i8+i*8] = -2;
1195 M32( h->mb.cache.mv[i_list][i8+i*8] ) = 0;
1199 if( h->param.b_cabac )
1201 if( i_top_type >= 0 )
1202 CP64( h->mb.cache.mvd[i_list][x264_scan8[0] - 8], h->mb.mvd[i_list][i_top_xy][0] );
1204 M64( h->mb.cache.mvd[i_list][x264_scan8[0] - 8] ) = 0;
1206 if( i_left_type >= 0 )
1208 CP16( h->mb.cache.mvd[i_list][x264_scan8[0 ] - 1], h->mb.mvd[i_list][i_left_xy][4] );
1209 CP16( h->mb.cache.mvd[i_list][x264_scan8[2 ] - 1], h->mb.mvd[i_list][i_left_xy][5] );
1210 CP16( h->mb.cache.mvd[i_list][x264_scan8[8 ] - 1], h->mb.mvd[i_list][i_left_xy][6] );
1211 CP16( h->mb.cache.mvd[i_list][x264_scan8[10] - 1], h->mb.mvd[i_list][i_left_xy][3] );
1214 for( int i = 0; i < 4; i++ )
1215 M16( h->mb.cache.mvd[i_list][x264_scan8[0]-1+i*8] ) = 0;
1220 if( h->sh.i_type == SLICE_TYPE_B )
1222 h->mb.bipred_weight = h->mb.bipred_weight_buf[h->mb.b_interlaced&(i_mb_y&1)];
1223 h->mb.dist_scale_factor = h->mb.dist_scale_factor_buf[h->mb.b_interlaced&(i_mb_y&1)];
1224 if( h->param.b_cabac )
1227 x264_macroblock_cache_skip( h, 0, 0, 4, 4, 0 );
1228 skipbp = i_left_type >= 0 ? h->mb.skipbp[i_left_xy] : 0;
1229 h->mb.cache.skip[x264_scan8[0] - 1] = skipbp & 0x2;
1230 h->mb.cache.skip[x264_scan8[8] - 1] = skipbp & 0x8;
1231 skipbp = i_top_type >= 0 ? h->mb.skipbp[i_top_xy] : 0;
1232 h->mb.cache.skip[x264_scan8[0] - 8] = skipbp & 0x4;
1233 h->mb.cache.skip[x264_scan8[4] - 8] = skipbp & 0x8;
1237 if( h->sh.i_type == SLICE_TYPE_P )
1238 x264_mb_predict_mv_pskip( h, h->mb.cache.pskip_mv );
1241 h->mb.i_neighbour4[0] =
1242 h->mb.i_neighbour8[0] = (h->mb.i_neighbour_intra & (MB_TOP|MB_LEFT|MB_TOPLEFT))
1243 | ((h->mb.i_neighbour_intra & MB_TOP) ? MB_TOPRIGHT : 0);
1244 h->mb.i_neighbour4[4] =
1245 h->mb.i_neighbour4[1] = MB_LEFT | ((h->mb.i_neighbour_intra & MB_TOP) ? (MB_TOP|MB_TOPLEFT|MB_TOPRIGHT) : 0);
1246 h->mb.i_neighbour4[2] =
1247 h->mb.i_neighbour4[8] =
1248 h->mb.i_neighbour4[10] =
1249 h->mb.i_neighbour8[2] = MB_TOP|MB_TOPRIGHT | ((h->mb.i_neighbour_intra & MB_LEFT) ? (MB_LEFT|MB_TOPLEFT) : 0);
1250 h->mb.i_neighbour4[5] =
1251 h->mb.i_neighbour8[1] = MB_LEFT | (h->mb.i_neighbour_intra & MB_TOPRIGHT)
1252 | ((h->mb.i_neighbour_intra & MB_TOP) ? MB_TOP|MB_TOPLEFT : 0);
1255 static void ALWAYS_INLINE x264_macroblock_store_pic( x264_t *h, int i )
1258 int i_stride = h->fdec->i_stride[!!i];
1259 int i_stride2 = i_stride << h->mb.b_interlaced;
1260 int i_pix_offset = h->mb.b_interlaced
1261 ? w * (h->mb.i_mb_x + (h->mb.i_mb_y&~1) * i_stride) + (h->mb.i_mb_y&1) * i_stride
1262 : w * (h->mb.i_mb_x + h->mb.i_mb_y * i_stride);
1263 h->mc.copy[i?PIXEL_8x8:PIXEL_16x16]( &h->fdec->plane[i][i_pix_offset], i_stride2,
1264 h->mb.pic.p_fdec[i], FDEC_STRIDE, w );
1267 void x264_macroblock_cache_save( x264_t *h )
1269 const int i_mb_xy = h->mb.i_mb_xy;
1270 const int i_mb_type = x264_mb_type_fix[h->mb.i_type];
1271 const int s8x8 = h->mb.i_b8_stride;
1272 const int s4x4 = h->mb.i_b4_stride;
1273 const int i_mb_4x4 = h->mb.i_b4_xy;
1274 const int i_mb_8x8 = h->mb.i_b8_xy;
1276 /* GCC pessimizes direct stores to heap-allocated 8-bit arrays due to aliasing.*/
1277 /* By only dereferencing them once, we avoid this issue. */
1278 int8_t *intra4x4_pred_mode = h->mb.intra4x4_pred_mode[i_mb_xy];
1279 uint8_t *non_zero_count = h->mb.non_zero_count[i_mb_xy];
1281 x264_macroblock_store_pic( h, 0 );
1282 x264_macroblock_store_pic( h, 1 );
1283 x264_macroblock_store_pic( h, 2 );
1285 x264_prefetch_fenc( h, h->fdec, h->mb.i_mb_x, h->mb.i_mb_y );
1287 h->mb.type[i_mb_xy] = i_mb_type;
1288 h->mb.partition[i_mb_xy] = IS_INTRA( i_mb_type ) ? D_16x16 : h->mb.i_partition;
1289 h->mb.i_mb_prev_xy = i_mb_xy;
1292 if( i_mb_type == I_4x4 )
1294 CP32( &intra4x4_pred_mode[0], &h->mb.cache.intra4x4_pred_mode[x264_scan8[10]] );
1295 M32( &intra4x4_pred_mode[4] ) = pack8to32(h->mb.cache.intra4x4_pred_mode[x264_scan8[5] ],
1296 h->mb.cache.intra4x4_pred_mode[x264_scan8[7] ],
1297 h->mb.cache.intra4x4_pred_mode[x264_scan8[13] ], 0);
1299 else if( !h->param.b_constrained_intra || IS_INTRA(i_mb_type) )
1300 M64( intra4x4_pred_mode ) = I_PRED_4x4_DC * 0x0101010101010101ULL;
1302 M64( intra4x4_pred_mode ) = (uint8_t)(-1) * 0x0101010101010101ULL;
1305 if( i_mb_type == I_PCM )
1307 h->mb.qp[i_mb_xy] = 0;
1308 h->mb.i_last_dqp = 0;
1309 h->mb.i_cbp_chroma = 2;
1310 h->mb.i_cbp_luma = 0xf;
1311 h->mb.cbp[i_mb_xy] = 0x72f; /* all set */
1312 h->mb.b_transform_8x8 = 0;
1313 memset( non_zero_count, 16, sizeof( *h->mb.non_zero_count ) );
1317 /* save non zero count */
1318 CP32( &non_zero_count[0*4], &h->mb.cache.non_zero_count[x264_scan8[0]+0*8] );
1319 CP32( &non_zero_count[1*4], &h->mb.cache.non_zero_count[x264_scan8[0]+1*8] );
1320 CP32( &non_zero_count[2*4], &h->mb.cache.non_zero_count[x264_scan8[0]+2*8] );
1321 CP32( &non_zero_count[3*4], &h->mb.cache.non_zero_count[x264_scan8[0]+3*8] );
1322 M16( &non_zero_count[16+0*2] ) = M32( &h->mb.cache.non_zero_count[x264_scan8[16+0*2]-1] ) >> 8;
1323 M16( &non_zero_count[16+1*2] ) = M32( &h->mb.cache.non_zero_count[x264_scan8[16+1*2]-1] ) >> 8;
1324 M16( &non_zero_count[16+2*2] ) = M32( &h->mb.cache.non_zero_count[x264_scan8[16+2*2]-1] ) >> 8;
1325 M16( &non_zero_count[16+3*2] ) = M32( &h->mb.cache.non_zero_count[x264_scan8[16+3*2]-1] ) >> 8;
1327 if( h->mb.i_type != I_16x16 && h->mb.i_cbp_luma == 0 && h->mb.i_cbp_chroma == 0 )
1328 h->mb.i_qp = h->mb.i_last_qp;
1329 h->mb.qp[i_mb_xy] = h->mb.i_qp;
1330 h->mb.i_last_dqp = h->mb.i_qp - h->mb.i_last_qp;
1331 h->mb.i_last_qp = h->mb.i_qp;
1334 if( h->mb.i_cbp_luma == 0 && h->mb.i_type != I_8x8 )
1335 h->mb.b_transform_8x8 = 0;
1336 h->mb.mb_transform_size[i_mb_xy] = h->mb.b_transform_8x8;
1338 if( h->sh.i_type != SLICE_TYPE_I )
1340 if( !IS_INTRA( i_mb_type ) )
1342 h->mb.ref[0][i_mb_8x8+0+0*s8x8] = h->mb.cache.ref[0][x264_scan8[0]];
1343 h->mb.ref[0][i_mb_8x8+1+0*s8x8] = h->mb.cache.ref[0][x264_scan8[4]];
1344 h->mb.ref[0][i_mb_8x8+0+1*s8x8] = h->mb.cache.ref[0][x264_scan8[8]];
1345 h->mb.ref[0][i_mb_8x8+1+1*s8x8] = h->mb.cache.ref[0][x264_scan8[12]];
1346 for( int y = 0; y < 4; y++ )
1348 CP64( h->mb.mv[0][i_mb_4x4+y*s4x4+0], h->mb.cache.mv[0][x264_scan8[0]+8*y+0] );
1349 CP64( h->mb.mv[0][i_mb_4x4+y*s4x4+2], h->mb.cache.mv[0][x264_scan8[0]+8*y+2] );
1351 if( h->sh.i_type == SLICE_TYPE_B )
1353 h->mb.ref[1][i_mb_8x8+0+0*s8x8] = h->mb.cache.ref[1][x264_scan8[0]];
1354 h->mb.ref[1][i_mb_8x8+1+0*s8x8] = h->mb.cache.ref[1][x264_scan8[4]];
1355 h->mb.ref[1][i_mb_8x8+0+1*s8x8] = h->mb.cache.ref[1][x264_scan8[8]];
1356 h->mb.ref[1][i_mb_8x8+1+1*s8x8] = h->mb.cache.ref[1][x264_scan8[12]];
1357 for( int y = 0; y < 4; y++ )
1359 CP64( h->mb.mv[1][i_mb_4x4+y*s4x4+0], h->mb.cache.mv[1][x264_scan8[0]+8*y+0] );
1360 CP64( h->mb.mv[1][i_mb_4x4+y*s4x4+2], h->mb.cache.mv[1][x264_scan8[0]+8*y+2] );
1366 for( int i_list = 0; i_list < (h->sh.i_type == SLICE_TYPE_B ? 2 : 1 ); i_list++ )
1368 M16( &h->mb.ref[i_list][i_mb_8x8+0*s8x8] ) = (uint8_t)(-1) * 0x0101;
1369 M16( &h->mb.ref[i_list][i_mb_8x8+1*s8x8] ) = (uint8_t)(-1) * 0x0101;
1370 for( int y = 0; y < 4; y++ )
1372 M64( h->mb.mv[i_list][i_mb_4x4+y*s4x4+0] ) = 0;
1373 M64( h->mb.mv[i_list][i_mb_4x4+y*s4x4+2] ) = 0;
1379 if( h->param.b_cabac )
1381 if( IS_INTRA(i_mb_type) && i_mb_type != I_PCM )
1382 h->mb.chroma_pred_mode[i_mb_xy] = x264_mb_pred_mode8x8c_fix[ h->mb.i_chroma_pred_mode ];
1384 h->mb.chroma_pred_mode[i_mb_xy] = I_PRED_CHROMA_DC;
1386 if( !IS_INTRA( i_mb_type ) && !IS_SKIP( i_mb_type ) && !IS_DIRECT( i_mb_type ) )
1388 CP64( h->mb.mvd[0][i_mb_xy][0], h->mb.cache.mvd[0][x264_scan8[10]] );
1389 CP16( h->mb.mvd[0][i_mb_xy][4], h->mb.cache.mvd[0][x264_scan8[5 ]] );
1390 CP16( h->mb.mvd[0][i_mb_xy][5], h->mb.cache.mvd[0][x264_scan8[7 ]] );
1391 CP16( h->mb.mvd[0][i_mb_xy][6], h->mb.cache.mvd[0][x264_scan8[13]] );
1392 if( h->sh.i_type == SLICE_TYPE_B )
1394 CP64( h->mb.mvd[1][i_mb_xy][0], h->mb.cache.mvd[1][x264_scan8[10]] );
1395 CP16( h->mb.mvd[1][i_mb_xy][4], h->mb.cache.mvd[1][x264_scan8[5 ]] );
1396 CP16( h->mb.mvd[1][i_mb_xy][5], h->mb.cache.mvd[1][x264_scan8[7 ]] );
1397 CP16( h->mb.mvd[1][i_mb_xy][6], h->mb.cache.mvd[1][x264_scan8[13]] );
1402 M64( h->mb.mvd[0][i_mb_xy][0] ) = 0;
1403 M64( h->mb.mvd[0][i_mb_xy][4] ) = 0;
1404 if( h->sh.i_type == SLICE_TYPE_B )
1406 M64( h->mb.mvd[1][i_mb_xy][0] ) = 0;
1407 M64( h->mb.mvd[1][i_mb_xy][4] ) = 0;
1411 if( h->sh.i_type == SLICE_TYPE_B )
1413 if( i_mb_type == B_SKIP || i_mb_type == B_DIRECT )
1414 h->mb.skipbp[i_mb_xy] = 0xf;
1415 else if( i_mb_type == B_8x8 )
1417 int skipbp = ( h->mb.i_sub_partition[0] == D_DIRECT_8x8 ) << 0;
1418 skipbp |= ( h->mb.i_sub_partition[1] == D_DIRECT_8x8 ) << 1;
1419 skipbp |= ( h->mb.i_sub_partition[2] == D_DIRECT_8x8 ) << 2;
1420 skipbp |= ( h->mb.i_sub_partition[3] == D_DIRECT_8x8 ) << 3;
1421 h->mb.skipbp[i_mb_xy] = skipbp;
1424 h->mb.skipbp[i_mb_xy] = 0;
1430 void x264_macroblock_bipred_init( x264_t *h )
1432 for( int field = 0; field <= h->sh.b_mbaff; field++ )
1433 for( int i_ref0 = 0; i_ref0 < (h->i_ref0<<h->sh.b_mbaff); i_ref0++ )
1435 int poc0 = h->fref0[i_ref0>>h->sh.b_mbaff]->i_poc;
1436 if( h->sh.b_mbaff && field^(i_ref0&1) )
1437 poc0 += h->sh.i_delta_poc_bottom;
1438 for( int i_ref1 = 0; i_ref1 < (h->i_ref1<<h->sh.b_mbaff); i_ref1++ )
1440 int dist_scale_factor;
1441 int poc1 = h->fref1[i_ref1>>h->sh.b_mbaff]->i_poc;
1442 if( h->sh.b_mbaff && field^(i_ref1&1) )
1443 poc1 += h->sh.i_delta_poc_bottom;
1444 int cur_poc = h->fdec->i_poc + field*h->sh.i_delta_poc_bottom;
1445 int td = x264_clip3( poc1 - poc0, -128, 127 );
1446 if( td == 0 /* || pic0 is a long-term ref */ )
1447 dist_scale_factor = 256;
1450 int tb = x264_clip3( cur_poc - poc0, -128, 127 );
1451 int tx = (16384 + (abs(td) >> 1)) / td;
1452 dist_scale_factor = x264_clip3( (tb * tx + 32) >> 6, -1024, 1023 );
1455 h->mb.dist_scale_factor_buf[field][i_ref0][i_ref1] = dist_scale_factor;
1457 dist_scale_factor >>= 2;
1458 if( h->param.analyse.b_weighted_bipred
1459 && dist_scale_factor >= -64
1460 && dist_scale_factor <= 128 )
1462 h->mb.bipred_weight_buf[field][i_ref0][i_ref1] = 64 - dist_scale_factor;
1463 // ssse3 implementation of biweight doesn't support the extrema.
1464 // if we ever generate them, we'll have to drop that optimization.
1465 assert( dist_scale_factor >= -63 && dist_scale_factor <= 127 );
1468 h->mb.bipred_weight_buf[field][i_ref0][i_ref1] = 32;