+#define BIME_CACHE( dx, dy, list ) \
+{ \
+ x264_me_t *m = m##list;\
+ int i = 4 + 3*dx + dy; \
+ int mvx = om##list##x+dx;\
+ int mvy = om##list##y+dy;\
+ stride##list[i] = bw;\
+ src##list[i] = h->mc.get_ref( pixy_buf[list][i], &stride##list[i], m->p_fref, m->i_stride[0], mvx, mvy, bw, bh, weight_none ); \
+ if( rd )\
+ {\
+ h->mc.mc_chroma( pixu_buf[list][i], 8, m->p_fref[4], m->i_stride[1], mvx, mvy + mv##list##y_offset, bw>>1, bh>>1 );\
+ h->mc.mc_chroma( pixv_buf[list][i], 8, m->p_fref[5], m->i_stride[1], mvx, mvy + mv##list##y_offset, bw>>1, bh>>1 );\
+ }\
+}
+
+#define SATD_THRESH 17/16
+
+/* Don't unroll the BIME_CACHE loop. I couldn't find any way to force this
+ * other than making its iteration count not a compile-time constant. */
+int x264_iter_kludge = 0;
+
+static void ALWAYS_INLINE x264_me_refine_bidir( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2, int rd )
+{
+ static const int pixel_mv_offs[] = { 0, 4, 4*8, 0 };
+ int16_t *cache0_mv = h->mb.cache.mv[0][x264_scan8[i8*4]];
+ int16_t *cache0_mv2 = cache0_mv + pixel_mv_offs[m0->i_pixel];
+ int16_t *cache1_mv = h->mb.cache.mv[1][x264_scan8[i8*4]];
+ int16_t *cache1_mv2 = cache1_mv + pixel_mv_offs[m0->i_pixel];
+ const int i_pixel = m0->i_pixel;
+ const int bw = x264_pixel_size[i_pixel].w;
+ const int bh = x264_pixel_size[i_pixel].h;
+ const uint16_t *p_cost_m0x = m0->p_cost_mv - m0->mvp[0];
+ const uint16_t *p_cost_m0y = m0->p_cost_mv - m0->mvp[1];
+ const uint16_t *p_cost_m1x = m1->p_cost_mv - m1->mvp[0];
+ const uint16_t *p_cost_m1y = m1->p_cost_mv - m1->mvp[1];
+ ALIGNED_ARRAY_16( uint8_t, pixy_buf,[2],[9][16*16] );
+ ALIGNED_ARRAY_8( uint8_t, pixu_buf,[2],[9][8*8] );
+ ALIGNED_ARRAY_8( uint8_t, pixv_buf,[2],[9][8*8] );
+ uint8_t *src0[9];
+ uint8_t *src1[9];
+ uint8_t *pix = &h->mb.pic.p_fdec[0][(i8>>1)*8*FDEC_STRIDE+(i8&1)*8];
+ uint8_t *pixu = &h->mb.pic.p_fdec[1][(i8>>1)*4*FDEC_STRIDE+(i8&1)*4];
+ uint8_t *pixv = &h->mb.pic.p_fdec[2][(i8>>1)*4*FDEC_STRIDE+(i8&1)*4];
+ const int ref0 = h->mb.cache.ref[0][x264_scan8[i8*4]];
+ const int ref1 = h->mb.cache.ref[1][x264_scan8[i8*4]];
+ const int mv0y_offset = h->mb.b_interlaced & ref0 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+ const int mv1y_offset = h->mb.b_interlaced & ref1 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+ int stride0[9];
+ int stride1[9];
+ int bm0x = m0->mv[0], om0x = bm0x;
+ int bm0y = m0->mv[1], om0y = bm0y;
+ int bm1x = m1->mv[0], om1x = bm1x;
+ int bm1y = m1->mv[1], om1y = bm1y;
+ int bcost = COST_MAX;
+ int pass = 0;
+ int j;
+ int mc_list0 = 1, mc_list1 = 1;
+ uint64_t bcostrd = COST_MAX64;
+ /* each byte of visited represents 8 possible m1y positions, so a 4D array isn't needed */
+ ALIGNED_ARRAY_16( uint8_t, visited,[8],[8][8] );
+ /* all permutations of an offset in up to 2 of the dimensions */
+ static const int8_t dia4d[33][4] = {
+ {0,0,0,0},
+ {0,0,0,1}, {0,0,0,-1}, {0,0,1,0}, {0,0,-1,0},
+ {0,1,0,0}, {0,-1,0,0}, {1,0,0,0}, {-1,0,0,0},
+ {0,0,1,1}, {0,0,-1,-1},{0,1,1,0}, {0,-1,-1,0},
+ {1,1,0,0}, {-1,-1,0,0},{1,0,0,1}, {-1,0,0,-1},
+ {0,1,0,1}, {0,-1,0,-1},{1,0,1,0}, {-1,0,-1,0},
+ {0,0,-1,1},{0,0,1,-1}, {0,-1,1,0},{0,1,-1,0},
+ {-1,1,0,0},{1,-1,0,0}, {1,0,0,-1},{-1,0,0,1},
+ {0,-1,0,1},{0,1,0,-1}, {-1,0,1,0},{1,0,-1,0},
+ };
+
+ if( bm0y < h->mb.mv_min_spel[1] + 8 || bm1y < h->mb.mv_min_spel[1] + 8 ||
+ bm0y > h->mb.mv_max_spel[1] - 8 || bm1y > h->mb.mv_max_spel[1] - 8 ||
+ bm0x < h->mb.mv_min_spel[0] + 8 || bm1x < h->mb.mv_min_spel[0] + 8 ||
+ bm0x > h->mb.mv_max_spel[0] - 8 || bm1x > h->mb.mv_max_spel[0] - 8 )
+ return;
+
+ h->mc.memzero_aligned( visited, sizeof(uint8_t[8][8][8]) );
+
+ for( pass = 0; pass < 8; pass++ )
+ {
+ /* check all mv pairs that differ in at most 2 components from the current mvs. */
+ /* doesn't do chroma ME. this probably doesn't matter, as the gains
+ * from bidir ME are the same with and without chroma ME. */
+
+ if( mc_list0 )
+ for( j = x264_iter_kludge; j < 9; j++ )
+ BIME_CACHE( square1[j][0], square1[j][1], 0 );
+
+ if( mc_list1 )
+ for( j = x264_iter_kludge; j < 9; j++ )
+ BIME_CACHE( square1[j][0], square1[j][1], 1 );
+
+ for( j = !!pass; j < 33; j++ )
+ {
+ int m0x = dia4d[j][0] + om0x;
+ int m0y = dia4d[j][1] + om0y;
+ int m1x = dia4d[j][2] + om1x;
+ int m1y = dia4d[j][3] + om1y;
+ if( !pass || !((visited[(m0x)&7][(m0y)&7][(m1x)&7] & (1<<((m1y)&7)))) )
+ {
+ int i0 = 4 + 3*(m0x-om0x) + (m0y-om0y);
+ int i1 = 4 + 3*(m1x-om1x) + (m1y-om1y);
+ visited[(m0x)&7][(m0y)&7][(m1x)&7] |= (1<<((m1y)&7));
+ h->mc.avg[i_pixel]( pix, FDEC_STRIDE, src0[i0], stride0[i0], src1[i1], stride1[i1], i_weight );
+ int cost = h->pixf.mbcmp[i_pixel]( m0->p_fenc[0], FENC_STRIDE, pix, FDEC_STRIDE )
+ + p_cost_m0x[m0x] + p_cost_m0y[m0y] + p_cost_m1x[m1x] + p_cost_m1y[m1y];
+ if( rd )
+ {
+ if( cost < bcost * SATD_THRESH )
+ {
+ bcost = X264_MIN( cost, bcost );
+ M32( cache0_mv ) = pack16to32_mask(m0x,m0y);
+ M32( cache0_mv2 ) = pack16to32_mask(m0x,m0y);
+ M32( cache1_mv ) = pack16to32_mask(m1x,m1y);
+ M32( cache1_mv2 ) = pack16to32_mask(m1x,m1y);
+ h->mc.avg[i_pixel+3]( pixu, FDEC_STRIDE, pixu_buf[0][i0], 8, pixu_buf[1][i1], 8, i_weight );
+ h->mc.avg[i_pixel+3]( pixv, FDEC_STRIDE, pixv_buf[0][i0], 8, pixv_buf[1][i1], 8, i_weight );
+ uint64_t costrd = x264_rd_cost_part( h, i_lambda2, i8*4, m0->i_pixel );
+ COPY5_IF_LT( bcostrd, costrd, bm0x, m0x, bm0y, m0y, bm1x, m1x, bm1y, m1y );
+ }
+ }
+ else
+ COPY5_IF_LT( bcost, cost, bm0x, m0x, bm0y, m0y, bm1x, m1x, bm1y, m1y );
+ }
+ }
+
+ mc_list0 = (om0x-bm0x)|(om0y-bm0y);
+ mc_list1 = (om1x-bm1x)|(om1y-bm1y);
+ if( !mc_list0 && !mc_list1 )
+ break;
+
+ om0x = bm0x;
+ om0y = bm0y;
+ om1x = bm1x;
+ om1y = bm1y;
+ }
+
+ m0->mv[0] = bm0x;
+ m0->mv[1] = bm0y;
+ m1->mv[0] = bm1x;
+ m1->mv[1] = bm1y;
+}
+
+void x264_me_refine_bidir_satd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight )
+{
+ x264_me_refine_bidir( h, m0, m1, i_weight, 0, 0, 0 );
+}
+
+void x264_me_refine_bidir_rd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2 )
+{
+ /* Motion compensation is done as part of bidir_rd; don't repeat
+ * it in encoding. */
+ h->mb.b_skip_mc = 1;
+ x264_me_refine_bidir( h, m0, m1, i_weight, i8, i_lambda2, 1 );
+ h->mb.b_skip_mc = 0;
+}
+
+#undef COST_MV_SATD
+#define COST_MV_SATD( mx, my, dst, avoid_mvp ) \
+{ \
+ if( !avoid_mvp || !(mx == pmx && my == pmy) ) \
+ { \
+ h->mc.mc_luma( pix, FDEC_STRIDE, m->p_fref, m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
+ dst = h->pixf.mbcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, pix, FDEC_STRIDE ) \
+ + p_cost_mvx[mx] + p_cost_mvy[my]; \
+ COPY1_IF_LT( bsatd, dst ); \
+ } \
+ else \
+ dst = COST_MAX; \
+}
+
+#define COST_MV_RD( mx, my, satd, do_dir, mdir ) \
+{ \
+ if( satd <= bsatd * SATD_THRESH ) \
+ { \
+ uint64_t cost; \
+ M32( cache_mv ) = pack16to32_mask(mx,my); \
+ M32( cache_mv2 ) = pack16to32_mask(mx,my); \
+ if( m->i_pixel <= PIXEL_8x8 )\
+ {\
+ h->mc.mc_chroma( pixu, FDEC_STRIDE, m->p_fref[4], m->i_stride[1], mx, my + mvy_offset, bw>>1, bh>>1 );\
+ h->mc.mc_chroma( pixv, FDEC_STRIDE, m->p_fref[5], m->i_stride[1], mx, my + mvy_offset, bw>>1, bh>>1 );\
+ }\
+ cost = x264_rd_cost_part( h, i_lambda2, i4, m->i_pixel ); \
+ COPY4_IF_LT( bcost, cost, bmx, mx, bmy, my, dir, do_dir?mdir:dir ); \
+ } \
+}
+
+void x264_me_refine_qpel_rd( x264_t *h, x264_me_t *m, int i_lambda2, int i4, int i_list )
+{
+ // don't have to fill the whole mv cache rectangle
+ static const int pixel_mv_offs[] = { 0, 4, 4*8, 0, 2, 2*8, 0 };
+ int16_t *cache_mv = h->mb.cache.mv[i_list][x264_scan8[i4]];
+ int16_t *cache_mv2 = cache_mv + pixel_mv_offs[m->i_pixel];
+ const uint16_t *p_cost_mvx, *p_cost_mvy;
+ const int bw = x264_pixel_size[m->i_pixel].w;
+ const int bh = x264_pixel_size[m->i_pixel].h;
+ const int i_pixel = m->i_pixel;
+ const int mvy_offset = h->mb.b_interlaced & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+
+ uint64_t bcost = COST_MAX64;
+ int bmx = m->mv[0];
+ int bmy = m->mv[1];
+ int omx, omy, pmx, pmy, i, j;
+ unsigned bsatd;
+ int satd;
+ int dir = -2;
+ int i8 = i4>>2;
+ uint16_t amvd;
+
+ uint8_t *pix = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
+ uint8_t *pixu = &h->mb.pic.p_fdec[1][(i8>>1)*4*FDEC_STRIDE+(i8&1)*4];
+ uint8_t *pixv = &h->mb.pic.p_fdec[2][(i8>>1)*4*FDEC_STRIDE+(i8&1)*4];
+
+ h->mb.b_skip_mc = 1;
+
+ if( m->i_pixel != PIXEL_16x16 && i4 != 0 )
+ x264_mb_predict_mv( h, i_list, i4, bw>>2, m->mvp );
+ pmx = m->mvp[0];
+ pmy = m->mvp[1];
+ p_cost_mvx = m->p_cost_mv - pmx;
+ p_cost_mvy = m->p_cost_mv - pmy;
+ COST_MV_SATD( bmx, bmy, bsatd, 0 );
+ if( m->i_pixel != PIXEL_16x16 )
+ COST_MV_RD( bmx, bmy, 0, 0, 0 )
+ else
+ bcost = m->cost;
+
+ /* check the predicted mv */
+ if( (bmx != pmx || bmy != pmy)
+ && pmx >= h->mb.mv_min_spel[0] && pmx <= h->mb.mv_max_spel[0]
+ && pmy >= h->mb.mv_min_spel[1] && pmy <= h->mb.mv_max_spel[1] )
+ {
+ COST_MV_SATD( pmx, pmy, satd, 0 );
+ COST_MV_RD ( pmx, pmy, satd, 0, 0 );
+ /* The hex motion search is guaranteed to not repeat the center candidate,
+ * so if pmv is chosen, set the "MV to avoid checking" to bmv instead. */
+ if( bmx == pmx && bmy == pmy )
+ {
+ pmx = m->mv[0];
+ pmy = m->mv[1];
+ }
+ }
+
+ if( bmy < h->mb.mv_min_spel[1] + 3 || bmy > h->mb.mv_max_spel[1] - 3 ||
+ bmx < h->mb.mv_min_spel[0] + 3 || bmx > h->mb.mv_max_spel[0] - 3 )
+ {
+ h->mb.b_skip_mc = 0;
+ return;
+ }
+
+ /* subpel hex search, same pattern as ME HEX. */
+ dir = -2;
+ omx = bmx;
+ omy = bmy;
+ for( j=0; j<6; j++ )
+ {
+ COST_MV_SATD( omx + hex2[j+1][0], omy + hex2[j+1][1], satd, 1 );
+ COST_MV_RD ( omx + hex2[j+1][0], omy + hex2[j+1][1], satd, 1, j );
+ }
+
+ if( dir != -2 )
+ {
+ /* half hexagon, not overlapping the previous iteration */
+ for( i = 1; i < 10; i++ )
+ {
+ const int odir = mod6m1[dir+1];
+ if( bmy < h->mb.mv_min_spel[1] + 3 ||
+ bmy > h->mb.mv_max_spel[1] - 3 )
+ break;
+ dir = -2;
+ omx = bmx;
+ omy = bmy;
+ for( j=0; j<3; j++ )
+ {
+ COST_MV_SATD( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satd, 1 );
+ COST_MV_RD ( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satd, 1, odir-1+j );
+ }
+ if( dir == -2 )
+ break;
+ }
+ }
+
+ /* square refine, same pattern as ME HEX. */
+ omx = bmx;
+ omy = bmy;
+ for( i=0; i<8; i++ )
+ {
+ COST_MV_SATD( omx + square1[i+1][0], omy + square1[i+1][1], satd, 1 );
+ COST_MV_RD ( omx + square1[i+1][0], omy + square1[i+1][1], satd, 0, 0 );
+ }
+
+ m->cost = bcost;
+ m->mv[0] = bmx;
+ m->mv[1] = bmy;
+ x264_macroblock_cache_mv ( h, block_idx_x[i4], block_idx_y[i4], bw>>2, bh>>2, i_list, pack16to32_mask(bmx, bmy) );
+ amvd = pack8to16( X264_MIN(abs(bmx - m->mvp[0]),33), X264_MIN(abs(bmy - m->mvp[1]),33) );
+ x264_macroblock_cache_mvd( h, block_idx_x[i4], block_idx_y[i4], bw>>2, bh>>2, i_list, amvd );
+ h->mb.b_skip_mc = 0;
+}