X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=encoder%2Fme.c;h=b5552fb40ade4183bdb198c6df4e2d19694af405;hb=867c1eca89477f4022b447207da3013acb538d57;hp=3a86370e5688cdf325ed3b341eb5d17bb23118a8;hpb=2ec694181f8ba3eb1c4153e6b955d399d6448c25;p=x264 diff --git a/encoder/me.c b/encoder/me.c index 3a86370e..b5552fb4 100644 --- a/encoder/me.c +++ b/encoder/me.c @@ -1,7 +1,7 @@ /***************************************************************************** * me.c: motion estimation ***************************************************************************** - * Copyright (C) 2003-2012 x264 project + * Copyright (C) 2003-2016 x264 project * * Authors: Loren Merritt * Laurent Aimar @@ -61,21 +61,22 @@ static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_ite (p_cost_mvx[(mx)<<2] + p_cost_mvy[(my)<<2]) #define COST_MV( mx, my )\ +do\ {\ int cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE,\ &p_fref_w[(my)*stride+(mx)], stride )\ + BITS_MVD(mx,my);\ COPY3_IF_LT( bcost, cost, bmx, mx, bmy, my );\ -} +} while(0) -#define COST_MV_HPEL( mx, my ) \ -{ \ - intptr_t stride2 = 16; \ - pixel *src = h->mc.get_ref( pix, &stride2, m->p_fref, stride, mx, my, bw, bh, &m->weight[0] ); \ - int cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, src, stride2 ) \ - + p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \ - COPY3_IF_LT( bpred_cost, cost, bpred_mx, mx, bpred_my, my ); \ -} +#define COST_MV_HPEL( mx, my, cost )\ +do\ +{\ + intptr_t stride2 = 16;\ + pixel *src = h->mc.get_ref( pix, &stride2, m->p_fref, stride, mx, my, bw, bh, &m->weight[0] );\ + cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, src, stride2 )\ + + p_cost_mvx[ mx ] + p_cost_mvy[ my ];\ +} while(0) #define COST_MV_X3_DIR( m0x, m0y, m1x, m1y, m2x, m2y, costs )\ {\ @@ -174,6 +175,10 @@ static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_ite }\ } +#define FPEL(mv) (((mv)+2)>>2) /* Convert subpel MV to fullpel with rounding... */ +#define SPEL(mv) ((mv)<<2) /* ... and the reverse. */ +#define SPELx2(mv) (SPEL(mv)&0xFFFCFFFC) /* for two packed MVs */ + void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, int *p_halfpel_thresh ) { const int bw = x264_pixel_size[m->i_pixel].w; @@ -181,97 +186,136 @@ void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, const int i_pixel = m->i_pixel; const int stride = m->i_stride[0]; int i_me_range = h->param.analyse.i_me_range; - int bmx, bmy, bcost; - int bpred_mx = 0, bpred_my = 0, bpred_cost = COST_MAX; + int bmx, bmy, bcost = COST_MAX; + int bpred_cost = COST_MAX; int omx, omy, pmx, pmy; pixel *p_fenc = m->p_fenc[0]; pixel *p_fref_w = m->p_fref_w; - ALIGNED_ARRAY_16( pixel, pix,[16*16] ); - - int costs[16]; - - int mv_x_min = h->mb.mv_min_fpel[0]; - int mv_y_min = h->mb.mv_min_fpel[1]; - int mv_x_max = h->mb.mv_max_fpel[0]; - int mv_y_max = h->mb.mv_max_fpel[1]; - int mv_x_min_qpel = mv_x_min << 2; - int mv_y_min_qpel = mv_y_min << 2; - int mv_x_max_qpel = mv_x_max << 2; - int mv_y_max_qpel = mv_y_max << 2; + ALIGNED_ARRAY_N( pixel, pix,[16*16] ); + ALIGNED_ARRAY_8( int16_t, mvc_temp,[16],[2] ); + + ALIGNED_ARRAY_16( int, costs,[16] ); + + int mv_x_min = h->mb.mv_limit_fpel[0][0]; + int mv_y_min = h->mb.mv_limit_fpel[0][1]; + int mv_x_max = h->mb.mv_limit_fpel[1][0]; + int mv_y_max = h->mb.mv_limit_fpel[1][1]; /* Special version of pack to allow shortcuts in CHECK_MVRANGE */ #define pack16to32_mask2(mx,my) ((mx<<16)|(my&0x7FFF)) uint32_t mv_min = pack16to32_mask2( -mv_x_min, -mv_y_min ); uint32_t mv_max = pack16to32_mask2( mv_x_max, mv_y_max )|0x8000; + uint32_t pmv, bpred_mv = 0; #define CHECK_MVRANGE(mx,my) (!(((pack16to32_mask2(mx,my) + mv_min) | (mv_max - pack16to32_mask2(mx,my))) & 0x80004000)) const uint16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0]; const uint16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1]; - uint32_t pmv; - bmx = x264_clip3( m->mvp[0], mv_x_min_qpel, mv_x_max_qpel ); - bmy = x264_clip3( m->mvp[1], mv_y_min_qpel, mv_y_max_qpel ); - pmx = ( bmx + 2 ) >> 2; - pmy = ( bmy + 2 ) >> 2; - bcost = COST_MAX; - - /* try extra predictors if provided */ + /* Try extra predictors if provided. If subme >= 3, check subpel predictors, + * otherwise round them to fullpel. */ if( h->mb.i_subpel_refine >= 3 ) { - pmv = pack16to32_mask(bmx,bmy); - if( i_mvc ) - COST_MV_HPEL( bmx, bmy ); - for( int i = 0; i < i_mvc; i++ ) + /* Calculate and check the MVP first */ + int bpred_mx = x264_clip3( m->mvp[0], SPEL(mv_x_min), SPEL(mv_x_max) ); + int bpred_my = x264_clip3( m->mvp[1], SPEL(mv_y_min), SPEL(mv_y_max) ); + pmv = pack16to32_mask( bpred_mx, bpred_my ); + pmx = FPEL( bpred_mx ); + pmy = FPEL( bpred_my ); + + COST_MV_HPEL( bpred_mx, bpred_my, bpred_cost ); + int pmv_cost = bpred_cost; + + if( i_mvc > 0 ) { - if( M32( mvc[i] ) && (pmv != M32( mvc[i] )) ) + /* Clip MV candidates and eliminate those equal to zero and pmv. */ + int valid_mvcs = x264_predictor_clip( mvc_temp+2, mvc, i_mvc, h->mb.mv_limit_fpel, pmv ); + if( valid_mvcs > 0 ) { - int mx = x264_clip3( mvc[i][0], mv_x_min_qpel, mv_x_max_qpel ); - int my = x264_clip3( mvc[i][1], mv_y_min_qpel, mv_y_max_qpel ); - COST_MV_HPEL( mx, my ); + int i = 1, cost; + /* We stuff pmv here to branchlessly pick between pmv and the various + * MV candidates. [0] gets skipped in order to maintain alignment for + * x264_predictor_clip. */ + M32( mvc_temp[1] ) = pmv; + bpred_cost <<= 4; + do + { + int mx = mvc_temp[i+1][0]; + int my = mvc_temp[i+1][1]; + COST_MV_HPEL( mx, my, cost ); + COPY1_IF_LT( bpred_cost, (cost << 4) + i ); + } while( ++i <= valid_mvcs ); + bpred_mx = mvc_temp[(bpred_cost&15)+1][0]; + bpred_my = mvc_temp[(bpred_cost&15)+1][1]; + bpred_cost >>= 4; } } - bmx = ( bpred_mx + 2 ) >> 2; - bmy = ( bpred_my + 2 ) >> 2; - COST_MV( bmx, bmy ); + + /* Round the best predictor back to fullpel and get the cost, since this is where + * we'll be starting the fullpel motion search. */ + bmx = FPEL( bpred_mx ); + bmy = FPEL( bpred_my ); + bpred_mv = pack16to32_mask(bpred_mx, bpred_my); + if( bpred_mv&0x00030003 ) /* Only test if the tested predictor is actually subpel... */ + COST_MV( bmx, bmy ); + else /* Otherwise just copy the cost (we already know it) */ + bcost = bpred_cost; + + /* Test the zero vector if it hasn't been tested yet. */ + if( pmv ) + { + if( bmx|bmy ) COST_MV( 0, 0 ); + } + /* If a subpel mv candidate was better than the zero vector, the previous + * fullpel check won't have gotten it even if the pmv was zero. So handle + * that possibility here. */ + else + { + COPY3_IF_LT( bcost, pmv_cost, bmx, 0, bmy, 0 ); + } } else { - /* check the MVP */ - bmx = pmx; - bmy = pmy; + /* Calculate and check the fullpel MVP first */ + bmx = pmx = x264_clip3( FPEL(m->mvp[0]), mv_x_min, mv_x_max ); + bmy = pmy = x264_clip3( FPEL(m->mvp[1]), mv_y_min, mv_y_max ); + pmv = pack16to32_mask( bmx, bmy ); + /* Because we are rounding the predicted motion vector to fullpel, there will be * an extra MV cost in 15 out of 16 cases. However, when the predicted MV is * chosen as the best predictor, it is often the case that the subpel search will - * result in a vector at or next to the predicted motion vector. Therefore, it is - * sensible to omit the cost of the MV from the rounded MVP to avoid unfairly - * biasing against use of the predicted motion vector. */ + * result in a vector at or next to the predicted motion vector. Therefore, we omit + * the cost of the MV from the rounded MVP to avoid unfairly biasing against use of + * the predicted motion vector. + * + * Disclaimer: this is a post-hoc rationalization for why this hack works. */ bcost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, &p_fref_w[bmy*stride+bmx], stride ); - pmv = pack16to32_mask( bmx, bmy ); + if( i_mvc > 0 ) { - ALIGNED_ARRAY_8( int16_t, mvc_fpel,[16],[2] ); - x264_predictor_roundclip( mvc_fpel+2, mvc, i_mvc, mv_x_min, mv_x_max, mv_y_min, mv_y_max ); - M32( mvc_fpel[1] ) = pmv; - bcost <<= 4; - for( int i = 1; i <= i_mvc; i++ ) + /* Like in subme>=3, except we also round the candidates to fullpel. */ + int valid_mvcs = x264_predictor_roundclip( mvc_temp+2, mvc, i_mvc, h->mb.mv_limit_fpel, pmv ); + if( valid_mvcs > 0 ) { - if( M32( mvc_fpel[i+1] ) && (pmv != M32( mvc_fpel[i+1] )) ) + int i = 1, cost; + M32( mvc_temp[1] ) = pmv; + bcost <<= 4; + do { - int mx = mvc_fpel[i+1][0]; - int my = mvc_fpel[i+1][1]; - int cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, &p_fref_w[my*stride+mx], stride ) + BITS_MVD( mx, my ); - cost = (cost << 4) + i; - COPY1_IF_LT( bcost, cost ); - } + int mx = mvc_temp[i+1][0]; + int my = mvc_temp[i+1][1]; + cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, &p_fref_w[my*stride+mx], stride ) + BITS_MVD( mx, my ); + COPY1_IF_LT( bcost, (cost << 4) + i ); + } while( ++i <= valid_mvcs ); + bmx = mvc_temp[(bcost&15)+1][0]; + bmy = mvc_temp[(bcost&15)+1][1]; + bcost >>= 4; } - bmx = mvc_fpel[(bcost&15)+1][0]; - bmy = mvc_fpel[(bcost&15)+1][1]; - bcost >>= 4; } - } - if( pmv ) - COST_MV( 0, 0 ); + /* Same as above, except the condition is simpler. */ + if( pmv ) + COST_MV( 0, 0 ); + } switch( h->mb.i_me_method ) { @@ -321,14 +365,14 @@ void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, /* hexagon */ COST_MV_X3_DIR( -2,0, -1, 2, 1, 2, costs ); - COST_MV_X3_DIR( 2,0, 1,-2, -1,-2, costs+3 ); + COST_MV_X3_DIR( 2,0, 1,-2, -1,-2, costs+4 ); /* +4 for 16-byte alignment */ bcost <<= 3; COPY1_IF_LT( bcost, (costs[0]<<3)+2 ); COPY1_IF_LT( bcost, (costs[1]<<3)+3 ); COPY1_IF_LT( bcost, (costs[2]<<3)+4 ); - COPY1_IF_LT( bcost, (costs[3]<<3)+5 ); - COPY1_IF_LT( bcost, (costs[4]<<3)+6 ); - COPY1_IF_LT( bcost, (costs[5]<<3)+7 ); + COPY1_IF_LT( bcost, (costs[4]<<3)+5 ); + COPY1_IF_LT( bcost, (costs[5]<<3)+6 ); + COPY1_IF_LT( bcost, (costs[6]<<3)+7 ); if( bcost&7 ) { @@ -358,19 +402,20 @@ void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, bcost >>= 3; #endif /* square refine */ - int dir = 0; + bcost <<= 4; COST_MV_X4_DIR( 0,-1, 0,1, -1,0, 1,0, costs ); - COPY2_IF_LT( bcost, costs[0], dir, 1 ); - COPY2_IF_LT( bcost, costs[1], dir, 2 ); - COPY2_IF_LT( bcost, costs[2], dir, 3 ); - COPY2_IF_LT( bcost, costs[3], dir, 4 ); + COPY1_IF_LT( bcost, (costs[0]<<4)+1 ); + COPY1_IF_LT( bcost, (costs[1]<<4)+2 ); + COPY1_IF_LT( bcost, (costs[2]<<4)+3 ); + COPY1_IF_LT( bcost, (costs[3]<<4)+4 ); COST_MV_X4_DIR( -1,-1, -1,1, 1,-1, 1,1, costs ); - COPY2_IF_LT( bcost, costs[0], dir, 5 ); - COPY2_IF_LT( bcost, costs[1], dir, 6 ); - COPY2_IF_LT( bcost, costs[2], dir, 7 ); - COPY2_IF_LT( bcost, costs[3], dir, 8 ); - bmx += square1[dir][0]; - bmy += square1[dir][1]; + COPY1_IF_LT( bcost, (costs[0]<<4)+5 ); + COPY1_IF_LT( bcost, (costs[1]<<4)+6 ); + COPY1_IF_LT( bcost, (costs[2]<<4)+7 ); + COPY1_IF_LT( bcost, (costs[3]<<4)+8 ); + bmx += square1[bcost&15][0]; + bmy += square1[bcost&15][1]; + bcost >>= 4; break; } @@ -609,7 +654,7 @@ void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, if( h->mb.i_me_method == X264_ME_TESA ) { // ADS threshold, then SAD threshold, then keep the best few SADs, then SATD - mvsad_t *mvsads = (mvsad_t *)(xs + ((width+15)&~15) + 4); + mvsad_t *mvsads = (mvsad_t *)(xs + ((width+31)&~31) + 4); int nmvsad = 0, limit; int sad_thresh = i_me_range <= 16 ? 10 : i_me_range <= 24 ? 11 : 12; int bsad = h->pixf.sad[i_pixel]( p_fenc, FENC_STRIDE, p_fref_w+bmy*stride+bmx, stride ) @@ -626,7 +671,7 @@ void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, for( i = 0; i < xn-2; i += 3 ) { pixel *ref = p_fref_w+min_x+my*stride; - int sads[3]; + ALIGNED_ARRAY_16( int, sads,[4] ); /* padded to [4] for asm */ h->pixf.sad_x3[i_pixel]( p_fenc, ref+xs[i], ref+xs[i+1], ref+xs[i+2], stride, sads ); for( int j = 0; j < 3; j++ ) { @@ -662,10 +707,11 @@ void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, sad_thresh = bsad*sad_thresh>>3; while( nmvsad > limit*2 && sad_thresh > bsad ) { - int i; + int i = 0; // halve the range if the domain is too large... eh, close enough sad_thresh = (sad_thresh + bsad) >> 1; - for( i = 0; i < nmvsad && mvsads[i].sad <= sad_thresh; i++ ); + while( i < nmvsad && mvsads[i].sad <= sad_thresh ) + i++; for( int j = i; j < nmvsad; j++ ) { uint32_t sad; @@ -727,24 +773,22 @@ void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, } /* -> qpel mv */ - if( bpred_cost < bcost ) + uint32_t bmv = pack16to32_mask(bmx,bmy); + uint32_t bmv_spel = SPELx2(bmv); + if( h->mb.i_subpel_refine < 3 ) { - m->mv[0] = bpred_mx; - m->mv[1] = bpred_my; - m->cost = bpred_cost; + m->cost_mv = p_cost_mvx[bmx<<2] + p_cost_mvy[bmy<<2]; + m->cost = bcost; + /* compute the real cost */ + if( bmv == pmv ) m->cost += m->cost_mv; + M32( m->mv ) = bmv_spel; } else { - m->mv[0] = bmx << 2; - m->mv[1] = bmy << 2; - m->cost = bcost; + M32(m->mv) = bpred_cost < bcost ? bpred_mv : bmv_spel; + m->cost = X264_MIN( bpred_cost, bcost ); } - /* compute the real cost */ - m->cost_mv = p_cost_mvx[ m->mv[0] ] + p_cost_mvy[ m->mv[1] ]; - if( bmx == pmx && bmy == pmy && h->mb.i_subpel_refine < 3 ) - m->cost += m->cost_mv; - /* subpel refine */ if( h->mb.i_subpel_refine >= 2 ) { @@ -831,40 +875,52 @@ static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_ite int chroma_v_shift = CHROMA_V_SHIFT; int mvy_offset = chroma_v_shift & MB_INTERLACED & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0; - ALIGNED_ARRAY_16( pixel, pix,[64*18] ); // really 17x17x2, but round up for alignment + ALIGNED_ARRAY_N( pixel, pix,[64*18] ); // really 17x17x2, but round up for alignment + ALIGNED_ARRAY_16( int, costs,[4] ); int bmx = m->mv[0]; int bmy = m->mv[1]; int bcost = m->cost; int odir = -1, bdir; - /* try the subpel component of the predicted mv */ - if( hpel_iters && h->mb.i_subpel_refine < 3 ) - { - int mx = x264_clip3( m->mvp[0], h->mb.mv_min_spel[0]+2, h->mb.mv_max_spel[0]-2 ); - int my = x264_clip3( m->mvp[1], h->mb.mv_min_spel[1]+2, h->mb.mv_max_spel[1]-2 ); - if( (mx-bmx)|(my-bmy) ) - COST_MV_SAD( mx, my ); - } - /* halfpel diamond search */ - for( int i = hpel_iters; i > 0; i-- ) + if( hpel_iters ) { - int omx = bmx, omy = bmy; - int costs[4]; - intptr_t stride = 64; // candidates are either all hpel or all qpel, so one stride is enough - pixel *src0, *src1, *src2, *src3; - src0 = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], omx, omy-2, bw, bh+1, &m->weight[0] ); - src2 = h->mc.get_ref( pix+32, &stride, m->p_fref, m->i_stride[0], omx-2, omy, bw+4, bh, &m->weight[0] ); - src1 = src0 + stride; - src3 = src2 + 1; - h->pixf.fpelcmp_x4[i_pixel]( m->p_fenc[0], src0, src1, src2, src3, stride, costs ); - COPY2_IF_LT( bcost, costs[0] + p_cost_mvx[omx ] + p_cost_mvy[omy-2], bmy, omy-2 ); - COPY2_IF_LT( bcost, costs[1] + p_cost_mvx[omx ] + p_cost_mvy[omy+2], bmy, omy+2 ); - COPY3_IF_LT( bcost, costs[2] + p_cost_mvx[omx-2] + p_cost_mvy[omy ], bmx, omx-2, bmy, omy ); - COPY3_IF_LT( bcost, costs[3] + p_cost_mvx[omx+2] + p_cost_mvy[omy ], bmx, omx+2, bmy, omy ); - if( (bmx == omx) & (bmy == omy) ) - break; + /* try the subpel component of the predicted mv */ + if( h->mb.i_subpel_refine < 3 ) + { + int mx = x264_clip3( m->mvp[0], h->mb.mv_min_spel[0]+2, h->mb.mv_max_spel[0]-2 ); + int my = x264_clip3( m->mvp[1], h->mb.mv_min_spel[1]+2, h->mb.mv_max_spel[1]-2 ); + if( (mx-bmx)|(my-bmy) ) + COST_MV_SAD( mx, my ); + } + + bcost <<= 6; + for( int i = hpel_iters; i > 0; i-- ) + { + int omx = bmx, omy = bmy; + intptr_t stride = 64; // candidates are either all hpel or all qpel, so one stride is enough + pixel *src0, *src1, *src2, *src3; + src0 = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], omx, omy-2, bw, bh+1, &m->weight[0] ); + src2 = h->mc.get_ref( pix+32, &stride, m->p_fref, m->i_stride[0], omx-2, omy, bw+4, bh, &m->weight[0] ); + src1 = src0 + stride; + src3 = src2 + 1; + h->pixf.fpelcmp_x4[i_pixel]( m->p_fenc[0], src0, src1, src2, src3, stride, costs ); + costs[0] += p_cost_mvx[omx ] + p_cost_mvy[omy-2]; + costs[1] += p_cost_mvx[omx ] + p_cost_mvy[omy+2]; + costs[2] += p_cost_mvx[omx-2] + p_cost_mvy[omy ]; + costs[3] += p_cost_mvx[omx+2] + p_cost_mvy[omy ]; + COPY1_IF_LT( bcost, (costs[0]<<6)+2 ); + COPY1_IF_LT( bcost, (costs[1]<<6)+6 ); + COPY1_IF_LT( bcost, (costs[2]<<6)+16 ); + COPY1_IF_LT( bcost, (costs[3]<<6)+48 ); + if( !(bcost&63) ) + break; + bmx -= (bcost<<26)>>29; + bmy -= (bcost<<29)>>29; + bcost &= ~63; + } + bcost >>= 6; } if( !b_refine_qpel && (h->pixf.mbcmp_unaligned[0] != h->pixf.fpelcmp[0] || b_chroma_me) ) @@ -909,7 +965,6 @@ static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_ite /* Special simplified case for subme=1 */ else if( bmy > h->mb.mv_min_spel[1] && bmy < h->mb.mv_max_spel[1] && bmx > h->mb.mv_min_spel[0] && bmx < h->mb.mv_max_spel[0] ) { - int costs[4]; int omx = bmx, omy = bmy; /* We have to use mc_luma because all strides must be the same to use fpelcmp_x4 */ h->mc.mc_luma( pix , 64, m->p_fref, m->i_stride[0], omx, omy-1, bw, bh, &m->weight[0] ); @@ -917,10 +972,18 @@ static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_ite h->mc.mc_luma( pix+32, 64, m->p_fref, m->i_stride[0], omx-1, omy, bw, bh, &m->weight[0] ); h->mc.mc_luma( pix+48, 64, m->p_fref, m->i_stride[0], omx+1, omy, bw, bh, &m->weight[0] ); h->pixf.fpelcmp_x4[i_pixel]( m->p_fenc[0], pix, pix+16, pix+32, pix+48, 64, costs ); - COPY2_IF_LT( bcost, costs[0] + p_cost_mvx[omx ] + p_cost_mvy[omy-1], bmy, omy-1 ); - COPY2_IF_LT( bcost, costs[1] + p_cost_mvx[omx ] + p_cost_mvy[omy+1], bmy, omy+1 ); - COPY3_IF_LT( bcost, costs[2] + p_cost_mvx[omx-1] + p_cost_mvy[omy ], bmx, omx-1, bmy, omy ); - COPY3_IF_LT( bcost, costs[3] + p_cost_mvx[omx+1] + p_cost_mvy[omy ], bmx, omx+1, bmy, omy ); + costs[0] += p_cost_mvx[omx ] + p_cost_mvy[omy-1]; + costs[1] += p_cost_mvx[omx ] + p_cost_mvy[omy+1]; + costs[2] += p_cost_mvx[omx-1] + p_cost_mvy[omy ]; + costs[3] += p_cost_mvx[omx+1] + p_cost_mvy[omy ]; + bcost <<= 4; + COPY1_IF_LT( bcost, (costs[0]<<4)+1 ); + COPY1_IF_LT( bcost, (costs[1]<<4)+3 ); + COPY1_IF_LT( bcost, (costs[2]<<4)+4 ); + COPY1_IF_LT( bcost, (costs[3]<<4)+12 ); + bmx -= (bcost<<28)>>30; + bmy -= (bcost<<30)>>30; + bcost >>= 4; } m->cost = bcost; @@ -971,9 +1034,9 @@ static void ALWAYS_INLINE x264_me_refine_bidir( x264_t *h, x264_me_t *m0, x264_m const int i_pixel = m0->i_pixel; const int bw = x264_pixel_size[i_pixel].w; const int bh = x264_pixel_size[i_pixel].h; - ALIGNED_ARRAY_16( pixel, pixy_buf,[2],[9][16*16] ); - ALIGNED_ARRAY_16( pixel, pixu_buf,[2],[9][16*16] ); - ALIGNED_ARRAY_16( pixel, pixv_buf,[2],[9][16*16] ); + ALIGNED_ARRAY_N( pixel, pixy_buf,[2],[9][16*16] ); + ALIGNED_ARRAY_N( pixel, pixu_buf,[2],[9][16*16] ); + ALIGNED_ARRAY_N( pixel, pixv_buf,[2],[9][16*16] ); pixel *src[3][2][9]; int chromapix = h->luma2chroma_pixel[i_pixel]; int chroma_v_shift = CHROMA_V_SHIFT; @@ -996,7 +1059,7 @@ static void ALWAYS_INLINE x264_me_refine_bidir( x264_t *h, x264_me_t *m0, x264_m uint64_t bcostrd = COST_MAX64; uint16_t amvd; /* each byte of visited represents 8 possible m1y positions, so a 4D array isn't needed */ - ALIGNED_ARRAY_16( uint8_t, visited,[8],[8][8] ); + ALIGNED_ARRAY_N( uint8_t, visited,[8],[8][8] ); /* all permutations of an offset in up to 2 of the dimensions */ ALIGNED_4( static const int8_t dia4d[33][4] ) = {