/*****************************************************************************
* me.c: h264 encoder library (Motion Estimation)
*****************************************************************************
- * Copyright (C) 2003 Laurent Aimar
- * $Id: me.c,v 1.1 2004/06/03 19:27:08 fenrir Exp $
+ * Copyright (C) 2003-2008 x264 project
*
- * Authors: Laurent Aimar <fenrir@via.ecp.fr>
- * Loren Merritt <lorenm@u.washington.edu>
+ * Authors: Loren Merritt <lorenm@u.washington.edu>
+ * Laurent Aimar <fenrir@via.ecp.fr>
+ * Fiona Glaser <fiona@x264.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
*****************************************************************************/
#include "common/common.h"
+#include "macroblock.h"
#include "me.h"
/* presets selected from good points on the speed-vs-quality curve of several test videos
* subpel_iters[i_subpel_refine] = { refine_hpel, refine_qpel, me_hpel, me_qpel }
* where me_* are the number of EPZS iterations run on all candidate block types,
* and refine_* are run only on the winner.
- * the subme=7 values are much higher because any amount of satd search makes
- * up its time by reducing the number of rd iterations. */
-static const int subpel_iterations[][4] =
- {{1,0,0,0},
+ * the subme=8,9 values are much higher because any amount of satd search makes
+ * up its time by reducing the number of qpel-rd iterations. */
+static const int subpel_iterations[][4] =
+ {{0,0,0,0},
{1,1,0,0},
{0,1,1,0},
{0,2,1,0},
{0,2,1,1},
{0,2,1,2},
{0,0,2,2},
+ {0,0,2,2},
+ {0,0,4,10},
+ {0,0,4,10},
{0,0,4,10}};
/* (x-1)%6 */
static const int mod6m1[8] = {5,0,1,2,3,4,5,0};
/* radius 2 hexagon. repeated entries are to avoid having to compute mod6 every time. */
static const int hex2[8][2] = {{-1,-2}, {-2,0}, {-1,2}, {1,2}, {2,0}, {1,-2}, {-1,-2}, {-2,0}};
-static const int square1[8][2] = {{0,-1}, {0,1}, {-1,0}, {1,0}, {-1,-1}, {1,1}, {-1,1}, {1,-1}};
+static const int square1[9][2] = {{0,0}, {0,-1}, {0,1}, {-1,0}, {1,0}, {-1,-1}, {-1,1}, {1,-1}, {1,1}};
static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_iters, int *p_halfpel_thresh, int b_refine_qpel );
#define COST_MV( mx, my )\
{\
- int cost = h->pixf.sad[i_pixel]( m->p_fenc[0], FENC_STRIDE,\
- &p_fref[(my)*m->i_stride[0]+(mx)], m->i_stride[0] )\
+ int cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE,\
+ &p_fref_w[(my)*stride+(mx)], stride )\
+ BITS_MVD(mx,my);\
COPY3_IF_LT( bcost, cost, bmx, mx, bmy, my );\
}
#define COST_MV_HPEL( mx, my ) \
{ \
- int stride = 16; \
- uint8_t *src = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], mx, my, bw, bh ); \
- int cost = h->pixf.sad[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
+ int stride2 = 16; \
+ uint8_t *src = h->mc.get_ref( pix, &stride2, m->p_fref, stride, mx, my, bw, bh, &m->weight[0] ); \
+ int cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, src, stride2 ) \
+ p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
COPY3_IF_LT( bpred_cost, cost, bpred_mx, mx, bpred_my, my ); \
}
#define COST_MV_X3_DIR( m0x, m0y, m1x, m1y, m2x, m2y, costs )\
{\
- uint8_t *pix_base = p_fref + bmx + bmy*m->i_stride[0];\
- h->pixf.sad_x3[i_pixel]( m->p_fenc[0],\
- pix_base + (m0x) + (m0y)*m->i_stride[0],\
- pix_base + (m1x) + (m1y)*m->i_stride[0],\
- pix_base + (m2x) + (m2y)*m->i_stride[0],\
- m->i_stride[0], costs );\
+ uint8_t *pix_base = p_fref_w + bmx + bmy*stride;\
+ h->pixf.fpelcmp_x3[i_pixel]( p_fenc,\
+ pix_base + (m0x) + (m0y)*stride,\
+ pix_base + (m1x) + (m1y)*stride,\
+ pix_base + (m2x) + (m2y)*stride,\
+ stride, costs );\
(costs)[0] += BITS_MVD( bmx+(m0x), bmy+(m0y) );\
(costs)[1] += BITS_MVD( bmx+(m1x), bmy+(m1y) );\
(costs)[2] += BITS_MVD( bmx+(m2x), bmy+(m2y) );\
}
+#define COST_MV_X4_DIR( m0x, m0y, m1x, m1y, m2x, m2y, m3x, m3y, costs )\
+{\
+ uint8_t *pix_base = p_fref_w + bmx + bmy*stride;\
+ h->pixf.fpelcmp_x4[i_pixel]( p_fenc,\
+ pix_base + (m0x) + (m0y)*stride,\
+ pix_base + (m1x) + (m1y)*stride,\
+ pix_base + (m2x) + (m2y)*stride,\
+ pix_base + (m3x) + (m3y)*stride,\
+ stride, costs );\
+ (costs)[0] += BITS_MVD( bmx+(m0x), bmy+(m0y) );\
+ (costs)[1] += BITS_MVD( bmx+(m1x), bmy+(m1y) );\
+ (costs)[2] += BITS_MVD( bmx+(m2x), bmy+(m2y) );\
+ (costs)[3] += BITS_MVD( bmx+(m3x), bmy+(m3y) );\
+}
+
#define COST_MV_X4( m0x, m0y, m1x, m1y, m2x, m2y, m3x, m3y )\
{\
- uint8_t *pix_base = p_fref + omx + omy*m->i_stride[0];\
- h->pixf.sad_x4[i_pixel]( m->p_fenc[0],\
- pix_base + (m0x) + (m0y)*m->i_stride[0],\
- pix_base + (m1x) + (m1y)*m->i_stride[0],\
- pix_base + (m2x) + (m2y)*m->i_stride[0],\
- pix_base + (m3x) + (m3y)*m->i_stride[0],\
- m->i_stride[0], costs );\
+ uint8_t *pix_base = p_fref_w + omx + omy*stride;\
+ h->pixf.fpelcmp_x4[i_pixel]( p_fenc,\
+ pix_base + (m0x) + (m0y)*stride,\
+ pix_base + (m1x) + (m1y)*stride,\
+ pix_base + (m2x) + (m2y)*stride,\
+ pix_base + (m3x) + (m3y)*stride,\
+ stride, costs );\
costs[0] += BITS_MVD( omx+(m0x), omy+(m0y) );\
costs[1] += BITS_MVD( omx+(m1x), omy+(m1y) );\
costs[2] += BITS_MVD( omx+(m2x), omy+(m2y) );\
COPY3_IF_LT( bcost, costs[3], bmx, omx+(m3x), bmy, omy+(m3y) );\
}
-#define COST_MV_X4_ABS( m0x, m0y, m1x, m1y, m2x, m2y, m3x, m3y )\
+#define COST_MV_X3_ABS( m0x, m0y, m1x, m1y, m2x, m2y )\
{\
- h->pixf.sad_x4[i_pixel]( m->p_fenc[0],\
- p_fref + (m0x) + (m0y)*m->i_stride[0],\
- p_fref + (m1x) + (m1y)*m->i_stride[0],\
- p_fref + (m2x) + (m2y)*m->i_stride[0],\
- p_fref + (m3x) + (m3y)*m->i_stride[0],\
- m->i_stride[0], costs );\
- costs[0] += p_cost_mvx[m0x<<2]; /* no cost_mvy */\
- costs[1] += p_cost_mvx[m1x<<2];\
- costs[2] += p_cost_mvx[m2x<<2];\
- costs[3] += p_cost_mvx[m3x<<2];\
+ h->pixf.fpelcmp_x3[i_pixel]( p_fenc,\
+ p_fref_w + (m0x) + (m0y)*stride,\
+ p_fref_w + (m1x) + (m1y)*stride,\
+ p_fref_w + (m2x) + (m2y)*stride,\
+ stride, costs );\
+ costs[0] += p_cost_mvx[(m0x)<<2]; /* no cost_mvy */\
+ costs[1] += p_cost_mvx[(m1x)<<2];\
+ costs[2] += p_cost_mvx[(m2x)<<2];\
COPY3_IF_LT( bcost, costs[0], bmx, m0x, bmy, m0y );\
COPY3_IF_LT( bcost, costs[1], bmx, m1x, bmy, m1y );\
COPY3_IF_LT( bcost, costs[2], bmx, m2x, bmy, m2y );\
- COPY3_IF_LT( bcost, costs[3], bmx, m3x, bmy, m3y );\
}
/* 1 */
}\
}
-void x264_me_search_ref( x264_t *h, x264_me_t *m, int (*mvc)[2], int i_mvc, int *p_halfpel_thresh )
+void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, int *p_halfpel_thresh )
{
const int bw = x264_pixel_size[m->i_pixel].w;
const int bh = x264_pixel_size[m->i_pixel].h;
const int i_pixel = m->i_pixel;
+ const int stride = m->i_stride[0];
int i_me_range = h->param.analyse.i_me_range;
int bmx, bmy, bcost;
int bpred_mx = 0, bpred_my = 0, bpred_cost = COST_MAX;
int omx, omy, pmx, pmy;
- uint8_t *p_fref = m->p_fref[0];
- DECLARE_ALIGNED( uint8_t, pix[16*16], 16 );
-
+ uint8_t *p_fenc = m->p_fenc[0];
+ uint8_t *p_fref_w = m->p_fref_w;
+ ALIGNED_ARRAY_16( uint8_t, pix,[16*16] );
+
int i, j;
int dir;
- int costs[6];
+ int costs[16];
int mv_x_min = h->mb.mv_min_fpel[0];
int mv_y_min = h->mb.mv_min_fpel[1];
#define CHECK_MVRANGE(mx,my) ( mx >= mv_x_min && mx <= mv_x_max && my >= mv_y_min && my <= mv_y_max )
- const int16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0];
- const int16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
+ const uint16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0];
+ const uint16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
bmx = x264_clip3( m->mvp[0], mv_x_min*4, mv_x_max*4 );
bmy = x264_clip3( m->mvp[1], mv_y_min*4, mv_y_max*4 );
/* try extra predictors if provided */
if( h->mb.i_subpel_refine >= 3 )
{
+ uint32_t bmv = pack16to32_mask(bmx,bmy);
COST_MV_HPEL( bmx, bmy );
for( i = 0; i < i_mvc; i++ )
{
- const int mx = x264_clip3( mvc[i][0], mv_x_min*4, mv_x_max*4 );
- const int my = x264_clip3( mvc[i][1], mv_y_min*4, mv_y_max*4 );
- if( mx != bpred_mx || my != bpred_my )
- COST_MV_HPEL( mx, my );
+ if( M32( mvc[i] ) && (bmv - M32( mvc[i] )) )
+ {
+ int mx = x264_clip3( mvc[i][0], mv_x_min*4, mv_x_max*4 );
+ int my = x264_clip3( mvc[i][1], mv_y_min*4, mv_y_max*4 );
+ COST_MV_HPEL( mx, my );
+ }
}
bmx = ( bpred_mx + 2 ) >> 2;
bmy = ( bpred_my + 2 ) >> 2;
{
/* check the MVP */
COST_MV( pmx, pmy );
- /* I don't know why this helps */
- bcost -= BITS_MVD(bmx,bmy);
-
+ /* Because we are rounding the predicted motion vector to fullpel, there will be
+ * an extra MV cost in 15 out of 16 cases. However, when the predicted MV is
+ * chosen as the best predictor, it is often the case that the subpel search will
+ * result in a vector at or next to the predicted motion vector. Therefore, it is
+ * sensible to remove the cost of the MV from the rounded MVP to avoid unfairly
+ * biasing against use of the predicted motion vector. */
+ bcost -= BITS_MVD( pmx, pmy );
for( i = 0; i < i_mvc; i++ )
{
- const int mx = x264_clip3( ( mvc[i][0] + 2 ) >> 2, mv_x_min, mv_x_max );
- const int my = x264_clip3( ( mvc[i][1] + 2 ) >> 2, mv_y_min, mv_y_max );
- if( mx != bmx || my != bmy )
- COST_MV( mx, my );
+ int mx = (mvc[i][0] + 2) >> 2;
+ int my = (mvc[i][1] + 2) >> 2;
+ if( (mx | my) && ((mx-bmx) | (my-bmy)) )
+ {
+ mx = x264_clip3( mx, mv_x_min, mv_x_max );
+ my = x264_clip3( my, mv_y_min, mv_y_max );
+ COST_MV( mx, my );
+ }
}
}
-
COST_MV( 0, 0 );
switch( h->mb.i_me_method )
{
case X264_ME_DIA:
/* diamond search, radius 1 */
- for( i = 0; i < i_me_range; i++ )
+ i = 0;
+ bcost <<= 4;
+ do
{
- DIA1_ITER( bmx, bmy );
- if( bmx == omx && bmy == omy )
+ COST_MV_X4_DIR( 0,-1, 0,1, -1,0, 1,0, costs );
+ COPY1_IF_LT( bcost, (costs[0]<<4)+1 );
+ COPY1_IF_LT( bcost, (costs[1]<<4)+3 );
+ COPY1_IF_LT( bcost, (costs[2]<<4)+4 );
+ COPY1_IF_LT( bcost, (costs[3]<<4)+12 );
+ if( !(bcost&15) )
break;
+ bmx -= (bcost<<28)>>30;
+ bmy -= (bcost<<30)>>30;
+ bcost &= ~15;
if( !CHECK_MVRANGE(bmx, bmy) )
break;
- }
+ } while( ++i < i_me_range );
+ bcost >>= 4;
break;
case X264_ME_HEX:
}
#else
/* equivalent to the above, but eliminates duplicate candidates */
- dir = -2;
/* hexagon */
COST_MV_X3_DIR( -2,0, -1, 2, 1, 2, costs );
COST_MV_X3_DIR( 2,0, 1,-2, -1,-2, costs+3 );
- COPY2_IF_LT( bcost, costs[0], dir, 0 );
- COPY2_IF_LT( bcost, costs[1], dir, 1 );
- COPY2_IF_LT( bcost, costs[2], dir, 2 );
- COPY2_IF_LT( bcost, costs[3], dir, 3 );
- COPY2_IF_LT( bcost, costs[4], dir, 4 );
- COPY2_IF_LT( bcost, costs[5], dir, 5 );
-
- if( dir != -2 )
+ bcost <<= 3;
+ COPY1_IF_LT( bcost, (costs[0]<<3)+2 );
+ COPY1_IF_LT( bcost, (costs[1]<<3)+3 );
+ COPY1_IF_LT( bcost, (costs[2]<<3)+4 );
+ COPY1_IF_LT( bcost, (costs[3]<<3)+5 );
+ COPY1_IF_LT( bcost, (costs[4]<<3)+6 );
+ COPY1_IF_LT( bcost, (costs[5]<<3)+7 );
+
+ if( bcost&7 )
{
+ dir = (bcost&7)-2;
bmx += hex2[dir+1][0];
bmy += hex2[dir+1][1];
/* half hexagon, not overlapping the previous iteration */
for( i = 1; i < i_me_range/2 && CHECK_MVRANGE(bmx, bmy); i++ )
{
- const int odir = mod6m1[dir+1];
- COST_MV_X3_DIR( hex2[odir+0][0], hex2[odir+0][1],
- hex2[odir+1][0], hex2[odir+1][1],
- hex2[odir+2][0], hex2[odir+2][1],
+ COST_MV_X3_DIR( hex2[dir+0][0], hex2[dir+0][1],
+ hex2[dir+1][0], hex2[dir+1][1],
+ hex2[dir+2][0], hex2[dir+2][1],
costs );
- dir = -2;
- COPY2_IF_LT( bcost, costs[0], dir, odir-1 );
- COPY2_IF_LT( bcost, costs[1], dir, odir );
- COPY2_IF_LT( bcost, costs[2], dir, odir+1 );
- if( dir == -2 )
+ bcost &= ~7;
+ COPY1_IF_LT( bcost, (costs[0]<<3)+1 );
+ COPY1_IF_LT( bcost, (costs[1]<<3)+2 );
+ COPY1_IF_LT( bcost, (costs[2]<<3)+3 );
+ if( !(bcost&7) )
break;
+ dir += (bcost&7)-2;
+ dir = mod6m1[dir+1];
bmx += hex2[dir+1][0];
bmy += hex2[dir+1][1];
}
}
+ bcost >>= 3;
#endif
/* square refine */
- omx = bmx; omy = bmy;
- COST_MV_X4( 0,-1, 0,1, -1,0, 1,0 );
- COST_MV_X4( -1,-1, -1,1, 1,-1, 1,1 );
+ dir = 0;
+ COST_MV_X4_DIR( 0,-1, 0,1, -1,0, 1,0, costs );
+ COPY2_IF_LT( bcost, costs[0], dir, 1 );
+ COPY2_IF_LT( bcost, costs[1], dir, 2 );
+ COPY2_IF_LT( bcost, costs[2], dir, 3 );
+ COPY2_IF_LT( bcost, costs[3], dir, 4 );
+ COST_MV_X4_DIR( -1,-1, -1,1, 1,-1, 1,1, costs );
+ COPY2_IF_LT( bcost, costs[0], dir, 5 );
+ COPY2_IF_LT( bcost, costs[1], dir, 6 );
+ COPY2_IF_LT( bcost, costs[2], dir, 7 );
+ COPY2_IF_LT( bcost, costs[3], dir, 8 );
+ bmx += square1[dir][0];
+ bmy += square1[dir][1];
break;
case X264_ME_UMH:
/* refine predictors */
ucost1 = bcost;
DIA1_ITER( pmx, pmy );
- if( pmx || pmy )
+ if( pmx | pmy )
DIA1_ITER( 0, 0 );
if(i_pixel == PIXEL_4x4)
goto me_hex2;
ucost2 = bcost;
- if( (bmx || bmy) && (bmx!=pmx || bmy!=pmy) )
+ if( (bmx | bmy) && ((bmx-pmx) | (bmy-pmy)) )
DIA1_ITER( bmx, bmy );
if( bcost == ucost2 )
cross_start = 3;
+ abs( m->mvp[1] - mvc[0][1] );
denom++;
}
- for( i = 0; i < i_mvc-1; i++ )
- mvd += abs( mvc[i][0] - mvc[i+1][0] )
- + abs( mvc[i][1] - mvc[i+1][1] );
+ mvd += x264_predictor_difference( mvc, i_mvc );
}
sad_ctx = SAD_THRESH(1000) ? 0
* we are still centered on the same place as the DIA2. is this desirable? */
CROSS( cross_start, i_me_range, i_me_range/2 );
- /* 5x5 ESA */
- omx = bmx; omy = bmy;
- if( bcost != ucost2 )
- COST_MV_X4( 1, 0, 0, 1, -1, 0, 0,-1 );
- COST_MV_X4( 1, 1, -1, 1, -1,-1, 1,-1 );
- COST_MV_X4( 2,-1, 2, 0, 2, 1, 2, 2 );
- COST_MV_X4( 1, 2, 0, 2, -1, 2, -2, 2 );
- COST_MV_X4( -2, 1, -2, 0, -2,-1, -2,-2 );
- COST_MV_X4( -1,-2, 0,-2, 1,-2, 2,-2 );
+ COST_MV_X4( -2,-2, -2,2, 2,-2, 2,2 );
/* hexagon grid */
omx = bmx; omy = bmy;
- for( i = 1; i <= i_me_range/4; i++ )
+ const uint16_t *p_cost_omvx = p_cost_mvx + omx*4;
+ const uint16_t *p_cost_omvy = p_cost_mvy + omy*4;
+ i = 1;
+ do
{
static const int hex4[16][2] = {
- {-4, 2}, {-4, 1}, {-4, 0}, {-4,-1}, {-4,-2},
- { 4,-2}, { 4,-1}, { 4, 0}, { 4, 1}, { 4, 2},
- { 2, 3}, { 0, 4}, {-2, 3},
- {-2,-3}, { 0,-4}, { 2,-3},
+ { 0,-4}, { 0, 4}, {-2,-3}, { 2,-3},
+ {-4,-2}, { 4,-2}, {-4,-1}, { 4,-1},
+ {-4, 0}, { 4, 0}, {-4, 1}, { 4, 1},
+ {-4, 2}, { 4, 2}, {-2, 3}, { 2, 3},
};
if( 4*i > X264_MIN4( mv_x_max-omx, omx-mv_x_min,
}
else
{
- COST_MV_X4( -4*i, 2*i, -4*i, 1*i, -4*i, 0*i, -4*i,-1*i );
- COST_MV_X4( -4*i,-2*i, 4*i,-2*i, 4*i,-1*i, 4*i, 0*i );
- COST_MV_X4( 4*i, 1*i, 4*i, 2*i, 2*i, 3*i, 0*i, 4*i );
- COST_MV_X4( -2*i, 3*i, -2*i,-3*i, 0*i,-4*i, 2*i,-3*i );
+ int dir = 0;
+ uint8_t *pix_base = p_fref_w + omx + (omy-4*i)*stride;
+ int dy = i*stride;
+#define SADS(k,x0,y0,x1,y1,x2,y2,x3,y3)\
+ h->pixf.fpelcmp_x4[i_pixel]( p_fenc,\
+ pix_base x0*i+(y0-2*k+4)*dy,\
+ pix_base x1*i+(y1-2*k+4)*dy,\
+ pix_base x2*i+(y2-2*k+4)*dy,\
+ pix_base x3*i+(y3-2*k+4)*dy,\
+ stride, costs+4*k );\
+ pix_base += 2*dy;
+#define ADD_MVCOST(k,x,y) costs[k] += p_cost_omvx[x*4*i] + p_cost_omvy[y*4*i]
+#define MIN_MV(k,x,y) COPY2_IF_LT( bcost, costs[k], dir, x*16+(y&15) )
+ SADS( 0, +0,-4, +0,+4, -2,-3, +2,-3 );
+ SADS( 1, -4,-2, +4,-2, -4,-1, +4,-1 );
+ SADS( 2, -4,+0, +4,+0, -4,+1, +4,+1 );
+ SADS( 3, -4,+2, +4,+2, -2,+3, +2,+3 );
+ ADD_MVCOST( 0, 0,-4 );
+ ADD_MVCOST( 1, 0, 4 );
+ ADD_MVCOST( 2,-2,-3 );
+ ADD_MVCOST( 3, 2,-3 );
+ ADD_MVCOST( 4,-4,-2 );
+ ADD_MVCOST( 5, 4,-2 );
+ ADD_MVCOST( 6,-4,-1 );
+ ADD_MVCOST( 7, 4,-1 );
+ ADD_MVCOST( 8,-4, 0 );
+ ADD_MVCOST( 9, 4, 0 );
+ ADD_MVCOST( 10,-4, 1 );
+ ADD_MVCOST( 11, 4, 1 );
+ ADD_MVCOST( 12,-4, 2 );
+ ADD_MVCOST( 13, 4, 2 );
+ ADD_MVCOST( 14,-2, 3 );
+ ADD_MVCOST( 15, 2, 3 );
+ MIN_MV( 0, 0,-4 );
+ MIN_MV( 1, 0, 4 );
+ MIN_MV( 2,-2,-3 );
+ MIN_MV( 3, 2,-3 );
+ MIN_MV( 4,-4,-2 );
+ MIN_MV( 5, 4,-2 );
+ MIN_MV( 6,-4,-1 );
+ MIN_MV( 7, 4,-1 );
+ MIN_MV( 8,-4, 0 );
+ MIN_MV( 9, 4, 0 );
+ MIN_MV( 10,-4, 1 );
+ MIN_MV( 11, 4, 1 );
+ MIN_MV( 12,-4, 2 );
+ MIN_MV( 13, 4, 2 );
+ MIN_MV( 14,-2, 3 );
+ MIN_MV( 15, 2, 3 );
+#undef SADS
+#undef ADD_MVCOST
+#undef MIN_MV
+ if(dir)
+ {
+ bmx = omx + i*(dir>>4);
+ bmy = omy + i*((dir<<28)>>28);
+ }
}
- }
- if( bmy <= mv_y_max )
+ } while( ++i <= i_me_range/4 );
+ if( bmy <= mv_y_max && bmy >= mv_y_min && bmx <= mv_x_max && bmx >= mv_x_min )
goto me_hex2;
break;
}
case X264_ME_ESA:
+ case X264_ME_TESA:
{
- const int min_x = X264_MAX( bmx - i_me_range, mv_x_min);
- const int min_y = X264_MAX( bmy - i_me_range, mv_y_min);
- const int max_x = X264_MIN( bmx + i_me_range, mv_x_max);
- const int max_y = X264_MIN( bmy + i_me_range, mv_y_max);
- int mx, my;
+ const int min_x = X264_MAX( bmx - i_me_range, mv_x_min );
+ const int min_y = X264_MAX( bmy - i_me_range, mv_y_min );
+ const int max_x = X264_MIN( bmx + i_me_range, mv_x_max );
+ const int max_y = X264_MIN( bmy + i_me_range, mv_y_max );
+ /* SEA is fastest in multiples of 4 */
+ const int width = (max_x - min_x + 3) & ~3;
+ int my;
#if 0
/* plain old exhaustive search */
+ int mx;
for( my = min_y; my <= max_y; my++ )
for( mx = min_x; mx <= max_x; mx++ )
COST_MV( mx, my );
#else
/* successive elimination by comparing DC before a full SAD,
* because sum(abs(diff)) >= abs(diff(sum)). */
- const int stride = m->i_stride[0];
- static uint8_t zero[16*16] = {0,};
uint16_t *sums_base = m->integral;
- int enc_dc[4];
+ /* due to a GCC bug on some platforms (win32?), zero[] may not actually be aligned.
+ * this is not a problem because it is not used for any SSE instructions. */
+ ALIGNED_16( static uint8_t zero[8*FENC_STRIDE] );
+ ALIGNED_ARRAY_16( int, enc_dc,[4] );
int sad_size = i_pixel <= PIXEL_8x8 ? PIXEL_8x8 : PIXEL_4x4;
int delta = x264_pixel_size[sad_size].w;
- uint16_t *ads = x264_malloc((max_x-min_x+8) * sizeof(uint16_t));
+ int16_t *xs = h->scratch_buffer;
+ int xn;
+ uint16_t *cost_fpel_mvx = h->cost_mv_fpel[x264_lambda_tab[h->mb.i_qp]][-m->mvp[0]&3] + (-m->mvp[0]>>2);
- h->pixf.sad_x4[sad_size]( zero, m->p_fenc[0], m->p_fenc[0]+delta,
- m->p_fenc[0]+delta*FENC_STRIDE, m->p_fenc[0]+delta+delta*FENC_STRIDE,
+ h->pixf.sad_x4[sad_size]( zero, p_fenc, p_fenc+delta,
+ p_fenc+delta*FENC_STRIDE, p_fenc+delta+delta*FENC_STRIDE,
FENC_STRIDE, enc_dc );
if( delta == 4 )
sums_base += stride * (h->fenc->i_lines[0] + PADV*2);
if( i_pixel == PIXEL_8x16 || i_pixel == PIXEL_4x8 )
enc_dc[1] = enc_dc[2];
- for( my = min_y; my <= max_y; my++ )
+ if( h->mb.i_me_method == X264_ME_TESA )
{
- int mvs[3], i_mvs=0;
- bcost -= p_cost_mvy[my<<2];
- h->pixf.ads[i_pixel]( enc_dc, sums_base + min_x + my * stride, delta,
- ads, max_x-min_x+1 );
- for( mx = min_x; mx <= max_x; mx++ )
+ // ADS threshold, then SAD threshold, then keep the best few SADs, then SATD
+ mvsad_t *mvsads = (mvsad_t *)(xs + ((width+15)&~15));
+ int nmvsad = 0, limit;
+ int sad_thresh = i_me_range <= 16 ? 10 : i_me_range <= 24 ? 11 : 12;
+ int bsad = h->pixf.sad[i_pixel]( p_fenc, FENC_STRIDE, p_fref_w+bmy*stride+bmx, stride )
+ + BITS_MVD( bmx, bmy );
+ for( my = min_y; my <= max_y; my++ )
{
- if( ads[mx-min_x] < bcost - p_cost_mvx[mx<<2] )
+ int ycost = p_cost_mvy[my<<2];
+ if( bsad <= ycost )
+ continue;
+ bsad -= ycost;
+ xn = h->pixf.ads[i_pixel]( enc_dc, sums_base + min_x + my * stride, delta,
+ cost_fpel_mvx+min_x, xs, width, bsad*17/16 );
+ for( i=0; i<xn-2; i+=3 )
+ {
+ uint8_t *ref = p_fref_w+min_x+my*stride;
+ int sads[3];
+ h->pixf.sad_x3[i_pixel]( p_fenc, ref+xs[i], ref+xs[i+1], ref+xs[i+2], stride, sads );
+ for( j=0; j<3; j++ )
+ {
+ int sad = sads[j] + cost_fpel_mvx[xs[i+j]];
+ if( sad < bsad*sad_thresh>>3 )
+ {
+ COPY1_IF_LT( bsad, sad );
+ mvsads[nmvsad].sad = sad + ycost;
+ mvsads[nmvsad].mx = min_x+xs[i+j];
+ mvsads[nmvsad].my = my;
+ nmvsad++;
+ }
+ }
+ }
+ for( ; i<xn; i++ )
{
- if( i_mvs == 3 )
+ int mx = min_x+xs[i];
+ int sad = h->pixf.sad[i_pixel]( p_fenc, FENC_STRIDE, p_fref_w+mx+my*stride, stride )
+ + cost_fpel_mvx[xs[i]];
+ if( sad < bsad*sad_thresh>>3 )
{
- COST_MV_X4_ABS( mvs[0],my, mvs[1],my, mvs[2],my, mx,my );
- i_mvs = 0;
+ COPY1_IF_LT( bsad, sad );
+ mvsads[nmvsad].sad = sad + ycost;
+ mvsads[nmvsad].mx = mx;
+ mvsads[nmvsad].my = my;
+ nmvsad++;
}
+ }
+ bsad += ycost;
+ }
+
+ limit = i_me_range / 2;
+ sad_thresh = bsad*sad_thresh>>3;
+ while( nmvsad > limit*2 && sad_thresh > bsad )
+ {
+ // halve the range if the domain is too large... eh, close enough
+ sad_thresh = (sad_thresh + bsad) >> 1;
+ for( i=0; i<nmvsad && mvsads[i].sad <= sad_thresh; i++ );
+ for( j=i; j<nmvsad; j++ )
+ {
+ /* mvsad_t is not guaranteed to be 8 bytes on all archs, so check before using explicit write-combining */
+ if( sizeof( mvsad_t ) == sizeof( uint64_t ) )
+ CP64( &mvsads[i], &mvsads[j] );
else
- mvs[i_mvs++] = mx;
+ mvsads[i] = mvsads[j];
+ i += mvsads[j].sad <= sad_thresh;
}
+ nmvsad = i;
+ }
+ while( nmvsad > limit )
+ {
+ int bsad = mvsads[0].sad;
+ int bi = 0;
+ for( i=1; i<nmvsad; i++ )
+ COPY2_IF_GT( bsad, mvsads[i].sad, bi, i );
+ nmvsad--;
+ mvsads[bi] = mvsads[nmvsad];
+ if( sizeof( mvsad_t ) == sizeof( uint64_t ) )
+ CP64( &mvsads[bi], &mvsads[nmvsad] );
+ else
+ mvsads[bi] = mvsads[nmvsad];
+ }
+ for( i=0; i<nmvsad; i++ )
+ COST_MV( mvsads[i].mx, mvsads[i].my );
+ }
+ else
+ {
+ // just ADS and SAD
+ for( my = min_y; my <= max_y; my++ )
+ {
+ int ycost = p_cost_mvy[my<<2];
+ if( bcost <= ycost )
+ continue;
+ bcost -= ycost;
+ xn = h->pixf.ads[i_pixel]( enc_dc, sums_base + min_x + my * stride, delta,
+ cost_fpel_mvx+min_x, xs, width, bcost );
+ for( i=0; i<xn-2; i+=3 )
+ COST_MV_X3_ABS( min_x+xs[i],my, min_x+xs[i+1],my, min_x+xs[i+2],my );
+ bcost += ycost;
+ for( ; i<xn; i++ )
+ COST_MV( min_x+xs[i], my );
}
- bcost += p_cost_mvy[my<<2];
- for( i=0; i<i_mvs; i++ )
- COST_MV( mvs[i], my );
}
-
- x264_free(ads);
#endif
}
break;
int qpel = subpel_iterations[h->mb.i_subpel_refine][3];
refine_subpel( h, m, hpel, qpel, p_halfpel_thresh, 0 );
}
- else if( m->mv[1] > h->mb.mv_max_spel[1] )
- m->mv[1] = h->mb.mv_max_spel[1];
}
#undef COST_MV
if( m->i_pixel <= PIXEL_8x8 && h->sh.i_type == SLICE_TYPE_P )
m->cost -= m->i_ref_cost;
-
+
refine_subpel( h, m, hpel, qpel, NULL, 1 );
}
#define COST_MV_SAD( mx, my ) \
{ \
int stride = 16; \
- uint8_t *src = h->mc.get_ref( pix[0], &stride, m->p_fref, m->i_stride[0], mx, my, bw, bh ); \
- int cost = h->pixf.sad[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
+ uint8_t *src = h->mc.get_ref( pix[0], &stride, m->p_fref, m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
+ int cost = h->pixf.fpelcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
+ p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
COPY3_IF_LT( bcost, cost, bmx, mx, bmy, my ); \
}
if( b_refine_qpel || (dir^1) != odir ) \
{ \
int stride = 16; \
- uint8_t *src = h->mc.get_ref( pix[0], &stride, m->p_fref, m->i_stride[0], mx, my, bw, bh ); \
- int cost = h->pixf.mbcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
+ uint8_t *src = h->mc.get_ref( pix[0], &stride, m->p_fref, m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
+ int cost = h->pixf.mbcmp_unaligned[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
+ p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
if( b_chroma_me && cost < bcost ) \
{ \
- h->mc.mc_chroma( pix[0], 8, m->p_fref[4], m->i_stride[1], mx, my, bw/2, bh/2 ); \
+ h->mc.mc_chroma( pix[0], 8, m->p_fref[4], m->i_stride[1], mx, my + mvy_offset, bw/2, bh/2 ); \
+ if( m->weight[1].weightfn ) \
+ m->weight[1].weightfn[x264_pixel_size[i_pixel].w>>3]( pix[0], 8, pix[0], 8, \
+ &m->weight[1], x264_pixel_size[i_pixel].h>>1 ); \
cost += h->pixf.mbcmp[i_pixel+3]( m->p_fenc[1], FENC_STRIDE, pix[0], 8 ); \
if( cost < bcost ) \
{ \
- h->mc.mc_chroma( pix[0], 8, m->p_fref[5], m->i_stride[1], mx, my, bw/2, bh/2 ); \
+ h->mc.mc_chroma( pix[0], 8, m->p_fref[5], m->i_stride[1], mx, my + mvy_offset, bw/2, bh/2 ); \
cost += h->pixf.mbcmp[i_pixel+3]( m->p_fenc[2], FENC_STRIDE, pix[0], 8 ); \
+ if( m->weight[2].weightfn ) \
+ m->weight[2].weightfn[x264_pixel_size[i_pixel].w>>3]( pix[0], 8, pix[0], 8, \
+ &m->weight[2], x264_pixel_size[i_pixel].h>>1 ); \
} \
} \
if( cost < bcost ) \
{
const int bw = x264_pixel_size[m->i_pixel].w;
const int bh = x264_pixel_size[m->i_pixel].h;
- const int16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0];
- const int16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
+ const uint16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0];
+ const uint16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
const int i_pixel = m->i_pixel;
const int b_chroma_me = h->mb.b_chroma_me && i_pixel <= PIXEL_8x8;
+ const int mvy_offset = h->mb.b_interlaced & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
- DECLARE_ALIGNED( uint8_t, pix[2][32*18], 16 ); // really 17x17, but round up for alignment
+ ALIGNED_ARRAY_16( uint8_t, pix,[2],[32*18] ); // really 17x17, but round up for alignment
int omx, omy;
int i;
int bcost = m->cost;
int odir = -1, bdir;
-
/* try the subpel component of the predicted mv */
if( hpel_iters && h->mb.i_subpel_refine < 3 )
{
- int mx = x264_clip3( m->mvp[0], h->mb.mv_min_spel[0], h->mb.mv_max_spel[0] );
- int my = x264_clip3( m->mvp[1], h->mb.mv_min_spel[1], h->mb.mv_max_spel[1] );
- if( mx != bmx || my != bmy )
+ int mx = x264_clip3( m->mvp[0], h->mb.mv_min_spel[0]+2, h->mb.mv_max_spel[0]-2 );
+ int my = x264_clip3( m->mvp[1], h->mb.mv_min_spel[1]+2, h->mb.mv_max_spel[1]-2 );
+ if( (mx-bmx)|(my-bmy) )
COST_MV_SAD( mx, my );
}
int costs[4];
int stride = 32; // candidates are either all hpel or all qpel, so one stride is enough
uint8_t *src0, *src1, *src2, *src3;
- src0 = h->mc.get_ref( pix[0], &stride, m->p_fref, m->i_stride[0], omx, omy-2, bw, bh+1 );
- src2 = h->mc.get_ref( pix[1], &stride, m->p_fref, m->i_stride[0], omx-2, omy, bw+4, bh );
+ src0 = h->mc.get_ref( pix[0], &stride, m->p_fref, m->i_stride[0], omx, omy-2, bw, bh+1, &m->weight[0] );
+ src2 = h->mc.get_ref( pix[1], &stride, m->p_fref, m->i_stride[0], omx-2, omy, bw+4, bh, &m->weight[0] );
src1 = src0 + stride;
src3 = src2 + 1;
- h->pixf.sad_x4[i_pixel]( m->p_fenc[0], src0, src1, src2, src3, stride, costs );
+ h->pixf.fpelcmp_x4[i_pixel]( m->p_fenc[0], src0, src1, src2, src3, stride, costs );
COPY2_IF_LT( bcost, costs[0] + p_cost_mvx[omx ] + p_cost_mvy[omy-2], bmy, omy-2 );
COPY2_IF_LT( bcost, costs[1] + p_cost_mvx[omx ] + p_cost_mvy[omy+2], bmy, omy+2 );
COPY3_IF_LT( bcost, costs[2] + p_cost_mvx[omx-2] + p_cost_mvy[omy ], bmx, omx-2, bmy, omy );
COPY3_IF_LT( bcost, costs[3] + p_cost_mvx[omx+2] + p_cost_mvy[omy ], bmx, omx+2, bmy, omy );
- if( bmx == omx && bmy == omy )
+ if( (bmx == omx) & (bmy == omy) )
break;
}
if( !b_refine_qpel )
{
- /* check for mvrange */
- if( bmy > h->mb.mv_max_spel[1] )
- bmy = h->mb.mv_max_spel[1];
bcost = COST_MAX;
COST_MV_SATD( bmx, bmy, -1 );
}
bdir = -1;
for( i = qpel_iters; i > 0; i-- )
{
+ if( bmy <= h->mb.mv_min_spel[1] || bmy >= h->mb.mv_max_spel[1] || bmx <= h->mb.mv_min_spel[0] || bmx >= h->mb.mv_max_spel[0] )
+ break;
odir = bdir;
omx = bmx;
omy = bmy;
break;
}
- /* check for mvrange */
- if( bmy > h->mb.mv_max_spel[1] )
- {
- bmy = h->mb.mv_max_spel[1];
- bcost = COST_MAX;
- COST_MV_SATD( bmx, bmy, -1 );
- }
-
m->cost = bcost;
m->mv[0] = bmx;
m->mv[1] = bmy;
m->cost_mv = p_cost_mvx[ bmx ] + p_cost_mvy[ bmy ];
}
-#define BIME_CACHE( dx, dy ) \
+#define BIME_CACHE( dx, dy, list ) \
{ \
+ x264_me_t *m = m##list;\
int i = 4 + 3*dx + dy; \
- h->mc.mc_luma( pix0[i], bw, m0->p_fref, m0->i_stride[0], om0x+dx, om0y+dy, bw, bh ); \
- h->mc.mc_luma( pix1[i], bw, m1->p_fref, m1->i_stride[0], om1x+dx, om1y+dy, bw, bh ); \
-}
-
-#define BIME_CACHE2(a,b) \
- BIME_CACHE(a,b) \
- BIME_CACHE(-(a),-(b))
-
-#define COST_BIMV_SATD( m0x, m0y, m1x, m1y ) \
-if( pass == 0 || !visited[(m0x)&7][(m0y)&7][(m1x)&7][(m1y)&7] ) \
-{ \
- int cost; \
- int i0 = 4 + 3*(m0x-om0x) + (m0y-om0y); \
- int i1 = 4 + 3*(m1x-om1x) + (m1y-om1y); \
- visited[(m0x)&7][(m0y)&7][(m1x)&7][(m1y)&7] = 1; \
- memcpy( pix, pix0[i0], bs ); \
- if( i_weight == 32 ) \
- h->mc.avg[i_pixel]( pix, bw, pix1[i1], bw ); \
- else \
- h->mc.avg_weight[i_pixel]( pix, bw, pix1[i1], bw, i_weight ); \
- cost = h->pixf.mbcmp[i_pixel]( m0->p_fenc[0], FENC_STRIDE, pix, bw ) \
- + p_cost_m0x[ m0x ] + p_cost_m0y[ m0y ] \
- + p_cost_m1x[ m1x ] + p_cost_m1y[ m1y ]; \
- if( cost < bcost ) \
- { \
- bcost = cost; \
- bm0x = m0x; \
- bm0y = m0y; \
- bm1x = m1x; \
- bm1y = m1y; \
- } \
+ int mvx = om##list##x+dx;\
+ int mvy = om##list##y+dy;\
+ stride##list[i] = bw;\
+ src##list[i] = h->mc.get_ref( pixy_buf[list][i], &stride##list[i], m->p_fref, m->i_stride[0], mvx, mvy, bw, bh, weight_none ); \
+ if( rd )\
+ {\
+ h->mc.mc_chroma( pixu_buf[list][i], 8, m->p_fref[4], m->i_stride[1], mvx, mvy + mv##list##y_offset, bw>>1, bh>>1 );\
+ h->mc.mc_chroma( pixv_buf[list][i], 8, m->p_fref[5], m->i_stride[1], mvx, mvy + mv##list##y_offset, bw>>1, bh>>1 );\
+ }\
}
-#define CHECK_BIDIR(a,b,c,d) \
- COST_BIMV_SATD(om0x+a, om0y+b, om1x+c, om1y+d)
-
-#define CHECK_BIDIR2(a,b,c,d) \
- CHECK_BIDIR(a,b,c,d) \
- CHECK_BIDIR(-(a),-(b),-(c),-(d))
+#define SATD_THRESH 17/16
-#define CHECK_BIDIR8(a,b,c,d) \
- CHECK_BIDIR2(a,b,c,d) \
- CHECK_BIDIR2(b,c,d,a) \
- CHECK_BIDIR2(c,d,a,b) \
- CHECK_BIDIR2(d,a,b,c)
+/* Don't unroll the BIME_CACHE loop. I couldn't find any way to force this
+ * other than making its iteration count not a compile-time constant. */
+int x264_iter_kludge = 0;
-int x264_me_refine_bidir( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight )
+static void ALWAYS_INLINE x264_me_refine_bidir( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2, int rd )
{
+ static const int pixel_mv_offs[] = { 0, 4, 4*8, 0 };
+ int16_t *cache0_mv = h->mb.cache.mv[0][x264_scan8[i8*4]];
+ int16_t *cache0_mv2 = cache0_mv + pixel_mv_offs[m0->i_pixel];
+ int16_t *cache1_mv = h->mb.cache.mv[1][x264_scan8[i8*4]];
+ int16_t *cache1_mv2 = cache1_mv + pixel_mv_offs[m0->i_pixel];
const int i_pixel = m0->i_pixel;
const int bw = x264_pixel_size[i_pixel].w;
const int bh = x264_pixel_size[i_pixel].h;
- const int bs = bw*bh;
- const int16_t *p_cost_m0x = m0->p_cost_mv - x264_clip3( m0->mvp[0], h->mb.mv_min_spel[0], h->mb.mv_max_spel[0] );
- const int16_t *p_cost_m0y = m0->p_cost_mv - x264_clip3( m0->mvp[1], h->mb.mv_min_spel[0], h->mb.mv_max_spel[0] );
- const int16_t *p_cost_m1x = m1->p_cost_mv - x264_clip3( m1->mvp[0], h->mb.mv_min_spel[0], h->mb.mv_max_spel[0] );
- const int16_t *p_cost_m1y = m1->p_cost_mv - x264_clip3( m1->mvp[1], h->mb.mv_min_spel[0], h->mb.mv_max_spel[0] );
- DECLARE_ALIGNED( uint8_t, pix0[9][16*16], 16 );
- DECLARE_ALIGNED( uint8_t, pix1[9][16*16], 16 );
- DECLARE_ALIGNED( uint8_t, pix[16*16], 16 );
+ const uint16_t *p_cost_m0x = m0->p_cost_mv - m0->mvp[0];
+ const uint16_t *p_cost_m0y = m0->p_cost_mv - m0->mvp[1];
+ const uint16_t *p_cost_m1x = m1->p_cost_mv - m1->mvp[0];
+ const uint16_t *p_cost_m1y = m1->p_cost_mv - m1->mvp[1];
+ ALIGNED_ARRAY_16( uint8_t, pixy_buf,[2],[9][16*16] );
+ ALIGNED_8( uint8_t pixu_buf[2][9][8*8] );
+ ALIGNED_8( uint8_t pixv_buf[2][9][8*8] );
+ uint8_t *src0[9];
+ uint8_t *src1[9];
+ uint8_t *pix = &h->mb.pic.p_fdec[0][(i8>>1)*8*FDEC_STRIDE+(i8&1)*8];
+ uint8_t *pixu = &h->mb.pic.p_fdec[1][(i8>>1)*4*FDEC_STRIDE+(i8&1)*4];
+ uint8_t *pixv = &h->mb.pic.p_fdec[2][(i8>>1)*4*FDEC_STRIDE+(i8&1)*4];
+ const int ref0 = h->mb.cache.ref[0][x264_scan8[i8*4]];
+ const int ref1 = h->mb.cache.ref[1][x264_scan8[i8*4]];
+ const int mv0y_offset = h->mb.b_interlaced & ref0 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+ const int mv1y_offset = h->mb.b_interlaced & ref1 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
+ int stride0[9];
+ int stride1[9];
int bm0x = m0->mv[0], om0x = bm0x;
int bm0y = m0->mv[1], om0y = bm0y;
int bm1x = m1->mv[0], om1x = bm1x;
int bm1y = m1->mv[1], om1y = bm1y;
int bcost = COST_MAX;
int pass = 0;
- uint8_t visited[8][8][8][8];
- memset( visited, 0, sizeof(visited) );
-
- BIME_CACHE( 0, 0 );
- CHECK_BIDIR( 0, 0, 0, 0 );
-
- if( bm0y > h->mb.mv_max_spel[1] - 8 ||
- bm1y > h->mb.mv_max_spel[1] - 8 )
- return bcost;
+ int j;
+ int mc_list0 = 1, mc_list1 = 1;
+ uint64_t bcostrd = COST_MAX64;
+ /* each byte of visited represents 8 possible m1y positions, so a 4D array isn't needed */
+ ALIGNED_ARRAY_16( uint8_t, visited,[8],[8][8] );
+ /* all permutations of an offset in up to 2 of the dimensions */
+ static const int8_t dia4d[33][4] = {
+ {0,0,0,0},
+ {0,0,0,1}, {0,0,0,-1}, {0,0,1,0}, {0,0,-1,0},
+ {0,1,0,0}, {0,-1,0,0}, {1,0,0,0}, {-1,0,0,0},
+ {0,0,1,1}, {0,0,-1,-1},{0,1,1,0}, {0,-1,-1,0},
+ {1,1,0,0}, {-1,-1,0,0},{1,0,0,1}, {-1,0,0,-1},
+ {0,1,0,1}, {0,-1,0,-1},{1,0,1,0}, {-1,0,-1,0},
+ {0,0,-1,1},{0,0,1,-1}, {0,-1,1,0},{0,1,-1,0},
+ {-1,1,0,0},{1,-1,0,0}, {1,0,0,-1},{-1,0,0,1},
+ {0,-1,0,1},{0,1,0,-1}, {-1,0,1,0},{1,0,-1,0},
+ };
+
+ if( bm0y < h->mb.mv_min_spel[1] + 8 || bm1y < h->mb.mv_min_spel[1] + 8 ||
+ bm0y > h->mb.mv_max_spel[1] - 8 || bm1y > h->mb.mv_max_spel[1] - 8 ||
+ bm0x < h->mb.mv_min_spel[0] + 8 || bm1x < h->mb.mv_min_spel[0] + 8 ||
+ bm0x > h->mb.mv_max_spel[0] - 8 || bm1x > h->mb.mv_max_spel[0] - 8 )
+ return;
+
+ h->mc.memzero_aligned( visited, sizeof(uint8_t[8][8][8]) );
for( pass = 0; pass < 8; pass++ )
{
/* doesn't do chroma ME. this probably doesn't matter, as the gains
* from bidir ME are the same with and without chroma ME. */
- BIME_CACHE2( 1, 0 );
- BIME_CACHE2( 0, 1 );
- BIME_CACHE2( 1, 1 );
- BIME_CACHE2( 1,-1 );
+ if( mc_list0 )
+ for( j = x264_iter_kludge; j < 9; j++ )
+ BIME_CACHE( square1[j][0], square1[j][1], 0 );
+
+ if( mc_list1 )
+ for( j = x264_iter_kludge; j < 9; j++ )
+ BIME_CACHE( square1[j][0], square1[j][1], 1 );
- CHECK_BIDIR8( 0, 0, 0, 1 );
- CHECK_BIDIR8( 0, 0, 1, 1 );
- CHECK_BIDIR2( 0, 1, 0, 1 );
- CHECK_BIDIR2( 1, 0, 1, 0 );
- CHECK_BIDIR8( 0, 0,-1, 1 );
- CHECK_BIDIR2( 0,-1, 0, 1 );
- CHECK_BIDIR2(-1, 0, 1, 0 );
+ for( j = !!pass; j < 33; j++ )
+ {
+ int m0x = dia4d[j][0] + om0x;
+ int m0y = dia4d[j][1] + om0y;
+ int m1x = dia4d[j][2] + om1x;
+ int m1y = dia4d[j][3] + om1y;
+ if( !pass || !((visited[(m0x)&7][(m0y)&7][(m1x)&7] & (1<<((m1y)&7)))) )
+ {
+ int i0 = 4 + 3*(m0x-om0x) + (m0y-om0y);
+ int i1 = 4 + 3*(m1x-om1x) + (m1y-om1y);
+ visited[(m0x)&7][(m0y)&7][(m1x)&7] |= (1<<((m1y)&7));
+ h->mc.avg[i_pixel]( pix, FDEC_STRIDE, src0[i0], stride0[i0], src1[i1], stride1[i1], i_weight );
+ int cost = h->pixf.mbcmp[i_pixel]( m0->p_fenc[0], FENC_STRIDE, pix, FDEC_STRIDE )
+ + p_cost_m0x[m0x] + p_cost_m0y[m0y] + p_cost_m1x[m1x] + p_cost_m1y[m1y];
+ if( rd )
+ {
+ if( cost < bcost * SATD_THRESH )
+ {
+ bcost = X264_MIN( cost, bcost );
+ M32( cache0_mv ) = pack16to32_mask(m0x,m0y);
+ M32( cache0_mv2 ) = pack16to32_mask(m0x,m0y);
+ M32( cache1_mv ) = pack16to32_mask(m1x,m1y);
+ M32( cache1_mv2 ) = pack16to32_mask(m1x,m1y);
+ h->mc.avg[i_pixel+3]( pixu, FDEC_STRIDE, pixu_buf[0][i0], 8, pixu_buf[1][i1], 8, i_weight );
+ h->mc.avg[i_pixel+3]( pixv, FDEC_STRIDE, pixv_buf[0][i0], 8, pixv_buf[1][i1], 8, i_weight );
+ uint64_t costrd = x264_rd_cost_part( h, i_lambda2, i8*4, m0->i_pixel );
+ COPY5_IF_LT( bcostrd, costrd, bm0x, m0x, bm0y, m0y, bm1x, m1x, bm1y, m1y );
+ }
+ }
+ else
+ COPY5_IF_LT( bcost, cost, bm0x, m0x, bm0y, m0y, bm1x, m1x, bm1y, m1y );
+ }
+ }
- if( om0x == bm0x && om0y == bm0y && om1x == bm1x && om1y == bm1y )
+ mc_list0 = (om0x-bm0x)|(om0y-bm0y);
+ mc_list1 = (om1x-bm1x)|(om1y-bm1y);
+ if( !mc_list0 && !mc_list1 )
break;
om0x = bm0x;
om0y = bm0y;
om1x = bm1x;
om1y = bm1y;
- BIME_CACHE( 0, 0 );
}
m0->mv[0] = bm0x;
m0->mv[1] = bm0y;
m1->mv[0] = bm1x;
m1->mv[1] = bm1y;
- return bcost;
+}
+
+void x264_me_refine_bidir_satd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight )
+{
+ x264_me_refine_bidir( h, m0, m1, i_weight, 0, 0, 0 );
+}
+
+void x264_me_refine_bidir_rd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2 )
+{
+ /* Motion compensation is done as part of bidir_rd; don't repeat
+ * it in encoding. */
+ h->mb.b_skip_mc = 1;
+ x264_me_refine_bidir( h, m0, m1, i_weight, i8, i_lambda2, 1 );
+ h->mb.b_skip_mc = 0;
}
#undef COST_MV_SATD
-#define COST_MV_SATD( mx, my, dst ) \
+#define COST_MV_SATD( mx, my, dst, avoid_mvp ) \
{ \
- int stride = 16; \
- uint8_t *src = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], mx, my, bw*4, bh*4 ); \
- dst = h->pixf.mbcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
- + p_cost_mvx[mx] + p_cost_mvy[my]; \
- COPY1_IF_LT( bsatd, dst ); \
+ if( !avoid_mvp || !(mx == pmx && my == pmy) ) \
+ { \
+ h->mc.mc_luma( pix, FDEC_STRIDE, m->p_fref, m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
+ dst = h->pixf.mbcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, pix, FDEC_STRIDE ) \
+ + p_cost_mvx[mx] + p_cost_mvy[my]; \
+ COPY1_IF_LT( bsatd, dst ); \
+ } \
+ else \
+ dst = COST_MAX; \
}
#define COST_MV_RD( mx, my, satd, do_dir, mdir ) \
{ \
- if( satd <= bsatd * SATD_THRESH )\
+ if( satd <= bsatd * SATD_THRESH ) \
{ \
- int cost; \
- cache_mv[0] = cache_mv2[0] = mx; \
- cache_mv[1] = cache_mv2[1] = my; \
- cost = x264_rd_cost_part( h, i_lambda2, i8, m->i_pixel ); \
+ uint64_t cost; \
+ M32( cache_mv ) = pack16to32_mask(mx,my); \
+ M32( cache_mv2 ) = pack16to32_mask(mx,my); \
+ if( m->i_pixel <= PIXEL_8x8 )\
+ {\
+ h->mc.mc_chroma( pixu, FDEC_STRIDE, m->p_fref[4], m->i_stride[1], mx, my + mvy_offset, bw>>1, bh>>1 );\
+ h->mc.mc_chroma( pixv, FDEC_STRIDE, m->p_fref[5], m->i_stride[1], mx, my + mvy_offset, bw>>1, bh>>1 );\
+ }\
+ cost = x264_rd_cost_part( h, i_lambda2, i4, m->i_pixel ); \
COPY4_IF_LT( bcost, cost, bmx, mx, bmy, my, dir, do_dir?mdir:dir ); \
} \
}
-#define SATD_THRESH 17/16
-
-void x264_me_refine_qpel_rd( x264_t *h, x264_me_t *m, int i_lambda2, int i8 )
+void x264_me_refine_qpel_rd( x264_t *h, x264_me_t *m, int i_lambda2, int i4, int i_list )
{
// don't have to fill the whole mv cache rectangle
- static const int pixel_mv_offs[] = { 0, 4, 4*8, 0 };
- int16_t *cache_mv = h->mb.cache.mv[0][x264_scan8[i8*4]];
+ static const int pixel_mv_offs[] = { 0, 4, 4*8, 0, 2, 2*8, 0 };
+ int16_t *cache_mv = h->mb.cache.mv[i_list][x264_scan8[i4]];
int16_t *cache_mv2 = cache_mv + pixel_mv_offs[m->i_pixel];
- const int16_t *p_cost_mvx, *p_cost_mvy;
- const int bw = x264_pixel_size[m->i_pixel].w>>2;
- const int bh = x264_pixel_size[m->i_pixel].h>>2;
+ const uint16_t *p_cost_mvx, *p_cost_mvy;
+ const int bw = x264_pixel_size[m->i_pixel].w;
+ const int bh = x264_pixel_size[m->i_pixel].h;
const int i_pixel = m->i_pixel;
+ const int mvy_offset = h->mb.b_interlaced & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
- DECLARE_ALIGNED( uint8_t, pix[16*16], 16 );
- int bcost = m->i_pixel == PIXEL_16x16 ? m->cost : COST_MAX;
+ uint64_t bcost = COST_MAX64;
int bmx = m->mv[0];
int bmy = m->mv[1];
- int omx = bmx;
- int omy = bmy;
- int pmx, pmy, i, j;
+ int omx, omy, pmx, pmy, i, j;
unsigned bsatd;
- int satd = 0;
+ int satd;
int dir = -2;
- int satds[8];
+ int i8 = i4>>2;
- if( m->i_pixel != PIXEL_16x16 && i8 != 0 )
- x264_mb_predict_mv( h, 0, i8*4, bw, m->mvp );
+ uint8_t *pix = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
+ uint8_t *pixu = &h->mb.pic.p_fdec[1][(i8>>1)*4*FDEC_STRIDE+(i8&1)*4];
+ uint8_t *pixv = &h->mb.pic.p_fdec[2][(i8>>1)*4*FDEC_STRIDE+(i8&1)*4];
+
+ h->mb.b_skip_mc = 1;
+
+ if( m->i_pixel != PIXEL_16x16 && i4 != 0 )
+ x264_mb_predict_mv( h, i_list, i4, bw>>2, m->mvp );
pmx = m->mvp[0];
pmy = m->mvp[1];
p_cost_mvx = m->p_cost_mv - pmx;
p_cost_mvy = m->p_cost_mv - pmy;
- COST_MV_SATD( bmx, bmy, bsatd );
- COST_MV_RD( bmx, bmy, 0, 0, 0);
+ COST_MV_SATD( bmx, bmy, bsatd, 0 );
+ if( m->i_pixel != PIXEL_16x16 )
+ COST_MV_RD( bmx, bmy, 0, 0, 0 )
+ else
+ bcost = m->cost;
/* check the predicted mv */
if( (bmx != pmx || bmy != pmy)
&& pmx >= h->mb.mv_min_spel[0] && pmx <= h->mb.mv_max_spel[0]
&& pmy >= h->mb.mv_min_spel[1] && pmy <= h->mb.mv_max_spel[1] )
{
- COST_MV_SATD( pmx, pmy, satd );
- COST_MV_RD( pmx, pmy, satd, 0,0 );
+ COST_MV_SATD( pmx, pmy, satd, 0 );
+ COST_MV_RD ( pmx, pmy, satd, 0, 0 );
+ /* The hex motion search is guaranteed to not repeat the center candidate,
+ * so if pmv is chosen, set the "MV to avoid checking" to bmv instead. */
+ if( bmx == pmx && bmy == pmy )
+ {
+ pmx = m->mv[0];
+ pmy = m->mv[1];
+ }
+ }
+
+ if( bmy < h->mb.mv_min_spel[1] + 3 || bmy > h->mb.mv_max_spel[1] - 3 ||
+ bmx < h->mb.mv_min_spel[0] + 3 || bmx > h->mb.mv_max_spel[0] - 3 )
+ {
+ h->mb.b_skip_mc = 0;
+ return;
}
/* subpel hex search, same pattern as ME HEX. */
dir = -2;
omx = bmx;
omy = bmy;
- for( j=0; j<6; j++ ) COST_MV_SATD( omx + hex2[j+1][0], omy + hex2[j+1][1], satds[j] );
- for( j=0; j<6; j++ ) COST_MV_RD ( omx + hex2[j+1][0], omy + hex2[j+1][1], satds[j], 1,j );
+ for( j=0; j<6; j++ )
+ {
+ COST_MV_SATD( omx + hex2[j+1][0], omy + hex2[j+1][1], satd, 1 );
+ COST_MV_RD ( omx + hex2[j+1][0], omy + hex2[j+1][1], satd, 1, j );
+ }
+
if( dir != -2 )
{
/* half hexagon, not overlapping the previous iteration */
for( i = 1; i < 10; i++ )
{
const int odir = mod6m1[dir+1];
- if( bmy > h->mb.mv_max_spel[1] - 2 ||
- bmy < h->mb.mv_min_spel[1] - 2 )
+ if( bmy < h->mb.mv_min_spel[1] + 3 ||
+ bmy > h->mb.mv_max_spel[1] - 3 )
break;
dir = -2;
omx = bmx;
omy = bmy;
- for( j=0; j<3; j++ ) COST_MV_SATD( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satds[j] );
- for( j=0; j<3; j++ ) COST_MV_RD ( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satds[j], 1, odir-1+j );
+ for( j=0; j<3; j++ )
+ {
+ COST_MV_SATD( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satd, 1 );
+ COST_MV_RD ( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satd, 1, odir-1+j );
+ }
if( dir == -2 )
break;
}
}
- /* square refine, same as pattern as ME HEX. */
+ /* square refine, same pattern as ME HEX. */
omx = bmx;
omy = bmy;
- for( i=0; i<8; i++ ) COST_MV_SATD( omx + square1[i][0], omy + square1[i][1], satds[i] );
- for( i=0; i<8; i++ ) COST_MV_RD ( omx + square1[i][0], omy + square1[i][1], satds[i], 0,0 );
+ for( i=0; i<8; i++ )
+ {
+ COST_MV_SATD( omx + square1[i+1][0], omy + square1[i+1][1], satd, 1 );
+ COST_MV_RD ( omx + square1[i+1][0], omy + square1[i+1][1], satd, 0, 0 );
+ }
- bmy = x264_clip3( bmy, h->mb.mv_min_spel[1], h->mb.mv_max_spel[1] );
m->cost = bcost;
m->mv[0] = bmx;
m->mv[1] = bmy;
- x264_macroblock_cache_mv ( h, 2*(i8&1), i8&2, bw, bh, 0, bmx, bmy );
- x264_macroblock_cache_mvd( h, 2*(i8&1), i8&2, bw, bh, 0, bmx - pmx, bmy - pmy );
+ x264_macroblock_cache_mv ( h, block_idx_x[i4], block_idx_y[i4], bw>>2, bh>>2, i_list, pack16to32_mask(bmx, bmy) );
+ x264_macroblock_cache_mvd( h, block_idx_x[i4], block_idx_y[i4], bw>>2, bh>>2, i_list, pack16to32_mask(bmx - m->mvp[0], bmy - m->mvp[1]) );
+ h->mb.b_skip_mc = 0;
}
-