1 /*****************************************************************************
2 * me.c: h264 encoder library (Motion Estimation)
3 *****************************************************************************
4 * Copyright (C) 2003-2008 x264 project
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 *****************************************************************************/
25 #include "common/common.h"
26 #include "macroblock.h"
29 /* presets selected from good points on the speed-vs-quality curve of several test videos
30 * subpel_iters[i_subpel_refine] = { refine_hpel, refine_qpel, me_hpel, me_qpel }
31 * where me_* are the number of EPZS iterations run on all candidate block types,
32 * and refine_* are run only on the winner.
33 * the subme=8,9 values are much higher because any amount of satd search makes
34 * up its time by reducing the number of qpel-rd iterations. */
35 static const uint8_t subpel_iterations[][4] =
49 static const uint8_t mod6m1[8] = {5,0,1,2,3,4,5,0};
50 /* radius 2 hexagon. repeated entries are to avoid having to compute mod6 every time. */
51 static const int8_t hex2[8][2] = {{-1,-2}, {-2,0}, {-1,2}, {1,2}, {2,0}, {1,-2}, {-1,-2}, {-2,0}};
52 static const int8_t square1[9][2] = {{0,0}, {0,-1}, {0,1}, {-1,0}, {1,0}, {-1,-1}, {-1,1}, {1,-1}, {1,1}};
54 static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_iters, int *p_halfpel_thresh, int b_refine_qpel );
56 #define BITS_MVD( mx, my )\
57 (p_cost_mvx[(mx)<<2] + p_cost_mvy[(my)<<2])
59 #define COST_MV( mx, my )\
61 int cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE,\
62 &p_fref_w[(my)*stride+(mx)], stride )\
64 COPY3_IF_LT( bcost, cost, bmx, mx, bmy, my );\
67 #define COST_MV_HPEL( mx, my ) \
70 pixel *src = h->mc.get_ref( pix, &stride2, m->p_fref, stride, mx, my, bw, bh, &m->weight[0] ); \
71 int cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, src, stride2 ) \
72 + p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
73 COPY3_IF_LT( bpred_cost, cost, bpred_mx, mx, bpred_my, my ); \
76 #define COST_MV_X3_DIR( m0x, m0y, m1x, m1y, m2x, m2y, costs )\
78 pixel *pix_base = p_fref_w + bmx + bmy*stride;\
79 h->pixf.fpelcmp_x3[i_pixel]( p_fenc,\
80 pix_base + (m0x) + (m0y)*stride,\
81 pix_base + (m1x) + (m1y)*stride,\
82 pix_base + (m2x) + (m2y)*stride,\
84 (costs)[0] += BITS_MVD( bmx+(m0x), bmy+(m0y) );\
85 (costs)[1] += BITS_MVD( bmx+(m1x), bmy+(m1y) );\
86 (costs)[2] += BITS_MVD( bmx+(m2x), bmy+(m2y) );\
89 #define COST_MV_X4_DIR( m0x, m0y, m1x, m1y, m2x, m2y, m3x, m3y, costs )\
91 pixel *pix_base = p_fref_w + bmx + bmy*stride;\
92 h->pixf.fpelcmp_x4[i_pixel]( p_fenc,\
93 pix_base + (m0x) + (m0y)*stride,\
94 pix_base + (m1x) + (m1y)*stride,\
95 pix_base + (m2x) + (m2y)*stride,\
96 pix_base + (m3x) + (m3y)*stride,\
98 (costs)[0] += BITS_MVD( bmx+(m0x), bmy+(m0y) );\
99 (costs)[1] += BITS_MVD( bmx+(m1x), bmy+(m1y) );\
100 (costs)[2] += BITS_MVD( bmx+(m2x), bmy+(m2y) );\
101 (costs)[3] += BITS_MVD( bmx+(m3x), bmy+(m3y) );\
104 #define COST_MV_X4( m0x, m0y, m1x, m1y, m2x, m2y, m3x, m3y )\
106 pixel *pix_base = p_fref_w + omx + omy*stride;\
107 h->pixf.fpelcmp_x4[i_pixel]( p_fenc,\
108 pix_base + (m0x) + (m0y)*stride,\
109 pix_base + (m1x) + (m1y)*stride,\
110 pix_base + (m2x) + (m2y)*stride,\
111 pix_base + (m3x) + (m3y)*stride,\
113 costs[0] += BITS_MVD( omx+(m0x), omy+(m0y) );\
114 costs[1] += BITS_MVD( omx+(m1x), omy+(m1y) );\
115 costs[2] += BITS_MVD( omx+(m2x), omy+(m2y) );\
116 costs[3] += BITS_MVD( omx+(m3x), omy+(m3y) );\
117 COPY3_IF_LT( bcost, costs[0], bmx, omx+(m0x), bmy, omy+(m0y) );\
118 COPY3_IF_LT( bcost, costs[1], bmx, omx+(m1x), bmy, omy+(m1y) );\
119 COPY3_IF_LT( bcost, costs[2], bmx, omx+(m2x), bmy, omy+(m2y) );\
120 COPY3_IF_LT( bcost, costs[3], bmx, omx+(m3x), bmy, omy+(m3y) );\
123 #define COST_MV_X3_ABS( m0x, m0y, m1x, m1y, m2x, m2y )\
125 h->pixf.fpelcmp_x3[i_pixel]( p_fenc,\
126 p_fref_w + (m0x) + (m0y)*stride,\
127 p_fref_w + (m1x) + (m1y)*stride,\
128 p_fref_w + (m2x) + (m2y)*stride,\
130 costs[0] += p_cost_mvx[(m0x)<<2]; /* no cost_mvy */\
131 costs[1] += p_cost_mvx[(m1x)<<2];\
132 costs[2] += p_cost_mvx[(m2x)<<2];\
133 COPY3_IF_LT( bcost, costs[0], bmx, m0x, bmy, m0y );\
134 COPY3_IF_LT( bcost, costs[1], bmx, m1x, bmy, m1y );\
135 COPY3_IF_LT( bcost, costs[2], bmx, m2x, bmy, m2y );\
141 #define DIA1_ITER( mx, my )\
144 COST_MV_X4( 0,-1, 0,1, -1,0, 1,0 );\
147 #define CROSS( start, x_max, y_max )\
150 if( (x_max) <= X264_MIN(mv_x_max-omx, omx-mv_x_min) )\
151 for( ; i < (x_max)-2; i+=4 )\
152 COST_MV_X4( i,0, -i,0, i+2,0, -i-2,0 );\
153 for( ; i < (x_max); i+=2 )\
155 if( omx+i <= mv_x_max )\
156 COST_MV( omx+i, omy );\
157 if( omx-i >= mv_x_min )\
158 COST_MV( omx-i, omy );\
161 if( (y_max) <= X264_MIN(mv_y_max-omy, omy-mv_y_min) )\
162 for( ; i < (y_max)-2; i+=4 )\
163 COST_MV_X4( 0,i, 0,-i, 0,i+2, 0,-i-2 );\
164 for( ; i < (y_max); i+=2 )\
166 if( omy+i <= mv_y_max )\
167 COST_MV( omx, omy+i );\
168 if( omy-i >= mv_y_min )\
169 COST_MV( omx, omy-i );\
173 void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, int *p_halfpel_thresh )
175 const int bw = x264_pixel_size[m->i_pixel].w;
176 const int bh = x264_pixel_size[m->i_pixel].h;
177 const int i_pixel = m->i_pixel;
178 const int stride = m->i_stride[0];
179 int i_me_range = h->param.analyse.i_me_range;
181 int bpred_mx = 0, bpred_my = 0, bpred_cost = COST_MAX;
182 int omx, omy, pmx, pmy;
183 pixel *p_fenc = m->p_fenc[0];
184 pixel *p_fref_w = m->p_fref_w;
185 ALIGNED_ARRAY_16( pixel, pix,[16*16] );
189 int mv_x_min = h->mb.mv_min_fpel[0];
190 int mv_y_min = h->mb.mv_min_fpel[1];
191 int mv_x_max = h->mb.mv_max_fpel[0];
192 int mv_y_max = h->mb.mv_max_fpel[1];
193 int mv_x_min_qpel = mv_x_min << 2;
194 int mv_y_min_qpel = mv_y_min << 2;
195 int mv_x_max_qpel = mv_x_max << 2;
196 int mv_y_max_qpel = mv_y_max << 2;
197 /* Special version of pack to allow shortcuts in CHECK_MVRANGE */
198 #define pack16to32_mask2(mx,my) ((mx<<16)|(my&0x7FFF))
199 uint32_t mv_min = pack16to32_mask2( -mv_x_min, -mv_y_min );
200 uint32_t mv_max = pack16to32_mask2( mv_x_max, mv_y_max )|0x8000;
202 #define CHECK_MVRANGE(mx,my) (!(((pack16to32_mask2(mx,my) + mv_min) | (mv_max - pack16to32_mask2(mx,my))) & 0x80004000))
204 const uint16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0];
205 const uint16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
208 bmx = x264_clip3( m->mvp[0], mv_x_min_qpel, mv_x_max_qpel );
209 bmy = x264_clip3( m->mvp[1], mv_y_min_qpel, mv_y_max_qpel );
210 pmx = ( bmx + 2 ) >> 2;
211 pmy = ( bmy + 2 ) >> 2;
214 /* try extra predictors if provided */
215 if( h->mb.i_subpel_refine >= 3 )
217 pmv = pack16to32_mask(bmx,bmy);
219 COST_MV_HPEL( bmx, bmy );
220 for( int i = 0; i < i_mvc; i++ )
222 if( M32( mvc[i] ) && (pmv != M32( mvc[i] )) )
224 int mx = x264_clip3( mvc[i][0], mv_x_min_qpel, mv_x_max_qpel );
225 int my = x264_clip3( mvc[i][1], mv_y_min_qpel, mv_y_max_qpel );
226 COST_MV_HPEL( mx, my );
229 bmx = ( bpred_mx + 2 ) >> 2;
230 bmy = ( bpred_my + 2 ) >> 2;
238 /* Because we are rounding the predicted motion vector to fullpel, there will be
239 * an extra MV cost in 15 out of 16 cases. However, when the predicted MV is
240 * chosen as the best predictor, it is often the case that the subpel search will
241 * result in a vector at or next to the predicted motion vector. Therefore, it is
242 * sensible to omit the cost of the MV from the rounded MVP to avoid unfairly
243 * biasing against use of the predicted motion vector. */
244 bcost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, &p_fref_w[bmy*stride+bmx], stride );
245 pmv = pack16to32_mask( bmx, bmy );
248 ALIGNED_ARRAY_8( int16_t, mvc_fpel,[16],[2] );
249 x264_predictor_roundclip( mvc_fpel, mvc, i_mvc, mv_x_min, mv_x_max, mv_y_min, mv_y_max );
251 for( int i = 1; i <= i_mvc; i++ )
253 if( M32( mvc_fpel[i-1] ) && (pmv != M32( mvc[i-1] )) )
255 int mx = mvc_fpel[i-1][0];
256 int my = mvc_fpel[i-1][1];
257 int cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, &p_fref_w[my*stride+mx], stride ) + BITS_MVD( mx, my );
258 cost = (cost << 4) + i;
259 COPY1_IF_LT( bcost, cost );
264 bmx = mvc_fpel[(bcost&15)-1][0];
265 bmy = mvc_fpel[(bcost&15)-1][1];
274 switch( h->mb.i_me_method )
278 /* diamond search, radius 1 */
283 COST_MV_X4_DIR( 0,-1, 0,1, -1,0, 1,0, costs );
284 COPY1_IF_LT( bcost, (costs[0]<<4)+1 );
285 COPY1_IF_LT( bcost, (costs[1]<<4)+3 );
286 COPY1_IF_LT( bcost, (costs[2]<<4)+4 );
287 COPY1_IF_LT( bcost, (costs[3]<<4)+12 );
290 bmx -= (bcost<<28)>>30;
291 bmy -= (bcost<<30)>>30;
293 } while( --i && CHECK_MVRANGE(bmx, bmy) );
301 /* hexagon search, radius 2 */
303 for( int i = 0; i < i_me_range/2; i++ )
305 omx = bmx; omy = bmy;
306 COST_MV( omx-2, omy );
307 COST_MV( omx-1, omy+2 );
308 COST_MV( omx+1, omy+2 );
309 COST_MV( omx+2, omy );
310 COST_MV( omx+1, omy-2 );
311 COST_MV( omx-1, omy-2 );
312 if( bmx == omx && bmy == omy )
314 if( !CHECK_MVRANGE(bmx, bmy) )
318 /* equivalent to the above, but eliminates duplicate candidates */
321 COST_MV_X3_DIR( -2,0, -1, 2, 1, 2, costs );
322 COST_MV_X3_DIR( 2,0, 1,-2, -1,-2, costs+3 );
324 COPY1_IF_LT( bcost, (costs[0]<<3)+2 );
325 COPY1_IF_LT( bcost, (costs[1]<<3)+3 );
326 COPY1_IF_LT( bcost, (costs[2]<<3)+4 );
327 COPY1_IF_LT( bcost, (costs[3]<<3)+5 );
328 COPY1_IF_LT( bcost, (costs[4]<<3)+6 );
329 COPY1_IF_LT( bcost, (costs[5]<<3)+7 );
333 int dir = (bcost&7)-2;
334 bmx += hex2[dir+1][0];
335 bmy += hex2[dir+1][1];
337 /* half hexagon, not overlapping the previous iteration */
338 for( int i = (i_me_range>>1) - 1; i > 0 && CHECK_MVRANGE(bmx, bmy); i-- )
340 COST_MV_X3_DIR( hex2[dir+0][0], hex2[dir+0][1],
341 hex2[dir+1][0], hex2[dir+1][1],
342 hex2[dir+2][0], hex2[dir+2][1],
345 COPY1_IF_LT( bcost, (costs[0]<<3)+1 );
346 COPY1_IF_LT( bcost, (costs[1]<<3)+2 );
347 COPY1_IF_LT( bcost, (costs[2]<<3)+3 );
352 bmx += hex2[dir+1][0];
353 bmy += hex2[dir+1][1];
360 COST_MV_X4_DIR( 0,-1, 0,1, -1,0, 1,0, costs );
361 COPY2_IF_LT( bcost, costs[0], dir, 1 );
362 COPY2_IF_LT( bcost, costs[1], dir, 2 );
363 COPY2_IF_LT( bcost, costs[2], dir, 3 );
364 COPY2_IF_LT( bcost, costs[3], dir, 4 );
365 COST_MV_X4_DIR( -1,-1, -1,1, 1,-1, 1,1, costs );
366 COPY2_IF_LT( bcost, costs[0], dir, 5 );
367 COPY2_IF_LT( bcost, costs[1], dir, 6 );
368 COPY2_IF_LT( bcost, costs[2], dir, 7 );
369 COPY2_IF_LT( bcost, costs[3], dir, 8 );
370 bmx += square1[dir][0];
371 bmy += square1[dir][1];
377 /* Uneven-cross Multi-Hexagon-grid Search
378 * as in JM, except with different early termination */
380 static const uint8_t x264_pixel_size_shift[7] = { 0, 1, 1, 2, 3, 3, 4 };
385 /* refine predictors */
387 DIA1_ITER( pmx, pmy );
391 if( i_pixel == PIXEL_4x4 )
395 if( (bmx | bmy) && ((bmx-pmx) | (bmy-pmy)) )
396 DIA1_ITER( bmx, bmy );
397 if( bcost == ucost2 )
399 omx = bmx; omy = bmy;
401 /* early termination */
402 #define SAD_THRESH(v) ( bcost < ( v >> x264_pixel_size_shift[i_pixel] ) )
403 if( bcost == ucost2 && SAD_THRESH(2000) )
405 COST_MV_X4( 0,-2, -1,-1, 1,-1, -2,0 );
406 COST_MV_X4( 2, 0, -1, 1, 1, 1, 0,2 );
407 if( bcost == ucost1 && SAD_THRESH(500) )
409 if( bcost == ucost2 )
411 int range = (i_me_range>>1) | 1;
412 CROSS( 3, range, range );
413 COST_MV_X4( -1,-2, 1,-2, -2,-1, 2,-1 );
414 COST_MV_X4( -2, 1, 2, 1, -1, 2, 1, 2 );
415 if( bcost == ucost2 )
417 cross_start = range + 2;
421 /* adaptive search range */
424 /* range multipliers based on casual inspection of some statistics of
425 * average distance between current predictor and final mv found by ESA.
426 * these have not been tuned much by actual encoding. */
427 static const uint8_t range_mul[4][4] =
435 int sad_ctx, mvd_ctx;
440 if( i_pixel == PIXEL_16x16 )
441 /* mvc is probably the same as mvp, so the difference isn't meaningful.
442 * but prediction usually isn't too bad, so just use medium range */
445 mvd = abs( m->mvp[0] - mvc[0][0] )
446 + abs( m->mvp[1] - mvc[0][1] );
450 /* calculate the degree of agreement between predictors. */
451 /* in 16x16, mvc includes all the neighbors used to make mvp,
452 * so don't count mvp separately. */
455 if( i_pixel != PIXEL_16x16 )
457 mvd = abs( m->mvp[0] - mvc[0][0] )
458 + abs( m->mvp[1] - mvc[0][1] );
461 mvd += x264_predictor_difference( mvc, i_mvc );
464 sad_ctx = SAD_THRESH(1000) ? 0
465 : SAD_THRESH(2000) ? 1
466 : SAD_THRESH(4000) ? 2 : 3;
467 mvd_ctx = mvd < 10*denom ? 0
469 : mvd < 40*denom ? 2 : 3;
471 i_me_range = i_me_range * range_mul[mvd_ctx][sad_ctx] >> 2;
474 /* FIXME if the above DIA2/OCT2/CROSS found a new mv, it has not updated omx/omy.
475 * we are still centered on the same place as the DIA2. is this desirable? */
476 CROSS( cross_start, i_me_range, i_me_range>>1 );
478 COST_MV_X4( -2,-2, -2,2, 2,-2, 2,2 );
481 omx = bmx; omy = bmy;
482 const uint16_t *p_cost_omvx = p_cost_mvx + omx*4;
483 const uint16_t *p_cost_omvy = p_cost_mvy + omy*4;
487 static const int8_t hex4[16][2] = {
488 { 0,-4}, { 0, 4}, {-2,-3}, { 2,-3},
489 {-4,-2}, { 4,-2}, {-4,-1}, { 4,-1},
490 {-4, 0}, { 4, 0}, {-4, 1}, { 4, 1},
491 {-4, 2}, { 4, 2}, {-2, 3}, { 2, 3},
494 if( 4*i > X264_MIN4( mv_x_max-omx, omx-mv_x_min,
495 mv_y_max-omy, omy-mv_y_min ) )
497 for( int j = 0; j < 16; j++ )
499 int mx = omx + hex4[j][0]*i;
500 int my = omy + hex4[j][1]*i;
501 if( CHECK_MVRANGE(mx, my) )
508 pixel *pix_base = p_fref_w + omx + (omy-4*i)*stride;
510 #define SADS(k,x0,y0,x1,y1,x2,y2,x3,y3)\
511 h->pixf.fpelcmp_x4[i_pixel]( p_fenc,\
512 pix_base x0*i+(y0-2*k+4)*dy,\
513 pix_base x1*i+(y1-2*k+4)*dy,\
514 pix_base x2*i+(y2-2*k+4)*dy,\
515 pix_base x3*i+(y3-2*k+4)*dy,\
516 stride, costs+4*k );\
518 #define ADD_MVCOST(k,x,y) costs[k] += p_cost_omvx[x*4*i] + p_cost_omvy[y*4*i]
519 #define MIN_MV(k,x,y) COPY2_IF_LT( bcost, costs[k], dir, x*16+(y&15) )
520 SADS( 0, +0,-4, +0,+4, -2,-3, +2,-3 );
521 SADS( 1, -4,-2, +4,-2, -4,-1, +4,-1 );
522 SADS( 2, -4,+0, +4,+0, -4,+1, +4,+1 );
523 SADS( 3, -4,+2, +4,+2, -2,+3, +2,+3 );
524 ADD_MVCOST( 0, 0,-4 );
525 ADD_MVCOST( 1, 0, 4 );
526 ADD_MVCOST( 2,-2,-3 );
527 ADD_MVCOST( 3, 2,-3 );
528 ADD_MVCOST( 4,-4,-2 );
529 ADD_MVCOST( 5, 4,-2 );
530 ADD_MVCOST( 6,-4,-1 );
531 ADD_MVCOST( 7, 4,-1 );
532 ADD_MVCOST( 8,-4, 0 );
533 ADD_MVCOST( 9, 4, 0 );
534 ADD_MVCOST( 10,-4, 1 );
535 ADD_MVCOST( 11, 4, 1 );
536 ADD_MVCOST( 12,-4, 2 );
537 ADD_MVCOST( 13, 4, 2 );
538 ADD_MVCOST( 14,-2, 3 );
539 ADD_MVCOST( 15, 2, 3 );
561 bmx = omx + i*(dir>>4);
562 bmy = omy + i*((dir<<28)>>28);
565 } while( ++i <= i_me_range>>2 );
566 if( bmy <= mv_y_max && bmy >= mv_y_min && bmx <= mv_x_max && bmx >= mv_x_min )
574 const int min_x = X264_MAX( bmx - i_me_range, mv_x_min );
575 const int min_y = X264_MAX( bmy - i_me_range, mv_y_min );
576 const int max_x = X264_MIN( bmx + i_me_range, mv_x_max );
577 const int max_y = X264_MIN( bmy + i_me_range, mv_y_max );
578 /* SEA is fastest in multiples of 4 */
579 const int width = (max_x - min_x + 3) & ~3;
581 /* plain old exhaustive search */
582 for( int my = min_y; my <= max_y; my++ )
583 for( int mx = min_x; mx <= max_x; mx++ )
586 /* successive elimination by comparing DC before a full SAD,
587 * because sum(abs(diff)) >= abs(diff(sum)). */
588 uint16_t *sums_base = m->integral;
589 /* due to a GCC bug on some platforms (win32?), zero[] may not actually be aligned.
590 * this is not a problem because it is not used for any SSE instructions. */
591 ALIGNED_16( static pixel zero[8*FENC_STRIDE] );
592 ALIGNED_ARRAY_16( int, enc_dc,[4] );
593 int sad_size = i_pixel <= PIXEL_8x8 ? PIXEL_8x8 : PIXEL_4x4;
594 int delta = x264_pixel_size[sad_size].w;
595 int16_t *xs = h->scratch_buffer;
597 uint16_t *cost_fpel_mvx = h->cost_mv_fpel[x264_lambda_tab[h->mb.i_qp]][-m->mvp[0]&3] + (-m->mvp[0]>>2);
599 h->pixf.sad_x4[sad_size]( zero, p_fenc, p_fenc+delta,
600 p_fenc+delta*FENC_STRIDE, p_fenc+delta+delta*FENC_STRIDE,
601 FENC_STRIDE, enc_dc );
603 sums_base += stride * (h->fenc->i_lines[0] + PADV*2);
604 if( i_pixel == PIXEL_16x16 || i_pixel == PIXEL_8x16 || i_pixel == PIXEL_4x8 )
606 if( i_pixel == PIXEL_8x16 || i_pixel == PIXEL_4x8 )
607 enc_dc[1] = enc_dc[2];
609 if( h->mb.i_me_method == X264_ME_TESA )
611 // ADS threshold, then SAD threshold, then keep the best few SADs, then SATD
612 mvsad_t *mvsads = (mvsad_t *)(xs + ((width+15)&~15) + 4);
613 int nmvsad = 0, limit;
614 int sad_thresh = i_me_range <= 16 ? 10 : i_me_range <= 24 ? 11 : 12;
615 int bsad = h->pixf.sad[i_pixel]( p_fenc, FENC_STRIDE, p_fref_w+bmy*stride+bmx, stride )
616 + BITS_MVD( bmx, bmy );
617 for( int my = min_y; my <= max_y; my++ )
620 int ycost = p_cost_mvy[my<<2];
624 xn = h->pixf.ads[i_pixel]( enc_dc, sums_base + min_x + my * stride, delta,
625 cost_fpel_mvx+min_x, xs, width, bsad * 17 >> 4 );
626 for( i = 0; i < xn-2; i += 3 )
628 pixel *ref = p_fref_w+min_x+my*stride;
630 h->pixf.sad_x3[i_pixel]( p_fenc, ref+xs[i], ref+xs[i+1], ref+xs[i+2], stride, sads );
631 for( int j = 0; j < 3; j++ )
633 int sad = sads[j] + cost_fpel_mvx[xs[i+j]];
634 if( sad < bsad*sad_thresh>>3 )
636 COPY1_IF_LT( bsad, sad );
637 mvsads[nmvsad].sad = sad + ycost;
638 mvsads[nmvsad].mv[0] = min_x+xs[i+j];
639 mvsads[nmvsad].mv[1] = my;
646 int mx = min_x+xs[i];
647 int sad = h->pixf.sad[i_pixel]( p_fenc, FENC_STRIDE, p_fref_w+mx+my*stride, stride )
648 + cost_fpel_mvx[xs[i]];
649 if( sad < bsad*sad_thresh>>3 )
651 COPY1_IF_LT( bsad, sad );
652 mvsads[nmvsad].sad = sad + ycost;
653 mvsads[nmvsad].mv[0] = mx;
654 mvsads[nmvsad].mv[1] = my;
661 limit = i_me_range >> 1;
662 sad_thresh = bsad*sad_thresh>>3;
663 while( nmvsad > limit*2 && sad_thresh > bsad )
666 // halve the range if the domain is too large... eh, close enough
667 sad_thresh = (sad_thresh + bsad) >> 1;
668 for( i = 0; i < nmvsad && mvsads[i].sad <= sad_thresh; i++ );
669 for( int j = i; j < nmvsad; j++ )
672 if( WORD_SIZE == 8 && sizeof(mvsad_t) == 8 )
674 uint64_t mvsad = M64( &mvsads[i] ) = M64( &mvsads[j] );
683 CP32( mvsads[i].mv, mvsads[j].mv );
686 i += (sad - (sad_thresh+1)) >> 31;
690 while( nmvsad > limit )
693 for( int i = 1; i < nmvsad; i++ )
694 if( mvsads[i].sad > mvsads[bi].sad )
697 if( sizeof( mvsad_t ) == sizeof( uint64_t ) )
698 CP64( &mvsads[bi], &mvsads[nmvsad] );
700 mvsads[bi] = mvsads[nmvsad];
702 for( int i = 0; i < nmvsad; i++ )
703 COST_MV( mvsads[i].mv[0], mvsads[i].mv[1] );
708 for( int my = min_y; my <= max_y; my++ )
711 int ycost = p_cost_mvy[my<<2];
715 xn = h->pixf.ads[i_pixel]( enc_dc, sums_base + min_x + my * stride, delta,
716 cost_fpel_mvx+min_x, xs, width, bcost );
717 for( i = 0; i < xn-2; i += 3 )
718 COST_MV_X3_ABS( min_x+xs[i],my, min_x+xs[i+1],my, min_x+xs[i+2],my );
721 COST_MV( min_x+xs[i], my );
730 if( bpred_cost < bcost )
734 m->cost = bpred_cost;
743 /* compute the real cost */
744 m->cost_mv = p_cost_mvx[ m->mv[0] ] + p_cost_mvy[ m->mv[1] ];
745 if( bmx == pmx && bmy == pmy && h->mb.i_subpel_refine < 3 )
746 m->cost += m->cost_mv;
749 if( h->mb.i_subpel_refine >= 2 )
751 int hpel = subpel_iterations[h->mb.i_subpel_refine][2];
752 int qpel = subpel_iterations[h->mb.i_subpel_refine][3];
753 refine_subpel( h, m, hpel, qpel, p_halfpel_thresh, 0 );
758 void x264_me_refine_qpel( x264_t *h, x264_me_t *m )
760 int hpel = subpel_iterations[h->mb.i_subpel_refine][0];
761 int qpel = subpel_iterations[h->mb.i_subpel_refine][1];
763 if( m->i_pixel <= PIXEL_8x8 )
764 m->cost -= m->i_ref_cost;
766 refine_subpel( h, m, hpel, qpel, NULL, 1 );
769 void x264_me_refine_qpel_refdupe( x264_t *h, x264_me_t *m, int *p_halfpel_thresh )
771 refine_subpel( h, m, 0, X264_MIN( 2, subpel_iterations[h->mb.i_subpel_refine][3] ), p_halfpel_thresh, 0 );
774 #define COST_MV_SAD( mx, my ) \
777 pixel *src = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
778 int cost = h->pixf.fpelcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
779 + p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
780 COPY3_IF_LT( bcost, cost, bmx, mx, bmy, my ); \
783 #define COST_MV_SATD( mx, my, dir ) \
784 if( b_refine_qpel || (dir^1) != odir ) \
787 pixel *src = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
788 int cost = h->pixf.mbcmp_unaligned[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
789 + p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
790 if( b_chroma_me && cost < bcost ) \
792 h->mc.mc_chroma( pix, pix+8, 16, m->p_fref[4], m->i_stride[1], mx, my + mvy_offset, bw>>1, bh>>1 ); \
793 if( m->weight[1].weightfn ) \
794 m->weight[1].weightfn[x264_pixel_size[i_pixel].w>>3]( pix, 16, pix, 16, \
795 &m->weight[1], x264_pixel_size[i_pixel].h>>1 ); \
796 cost += h->pixf.mbcmp[i_pixel+3]( m->p_fenc[1], FENC_STRIDE, pix, 16 ); \
799 if( m->weight[2].weightfn ) \
800 m->weight[2].weightfn[x264_pixel_size[i_pixel].w>>3]( pix+8, 16, pix+8, 16, \
801 &m->weight[2], x264_pixel_size[i_pixel].h>>1 ); \
802 cost += h->pixf.mbcmp[i_pixel+3]( m->p_fenc[2], FENC_STRIDE, pix+8, 16 ); \
805 COPY4_IF_LT( bcost, cost, bmx, mx, bmy, my, bdir, dir ); \
808 static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_iters, int *p_halfpel_thresh, int b_refine_qpel )
810 const int bw = x264_pixel_size[m->i_pixel].w;
811 const int bh = x264_pixel_size[m->i_pixel].h;
812 const uint16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0];
813 const uint16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
814 const int i_pixel = m->i_pixel;
815 const int b_chroma_me = h->mb.b_chroma_me && i_pixel <= PIXEL_8x8;
816 const int mvy_offset = h->mb.b_interlaced & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
818 ALIGNED_ARRAY_16( pixel, pix,[64*18] ); // really 17x17x2, but round up for alignment
825 /* try the subpel component of the predicted mv */
826 if( hpel_iters && h->mb.i_subpel_refine < 3 )
828 int mx = x264_clip3( m->mvp[0], h->mb.mv_min_spel[0]+2, h->mb.mv_max_spel[0]-2 );
829 int my = x264_clip3( m->mvp[1], h->mb.mv_min_spel[1]+2, h->mb.mv_max_spel[1]-2 );
830 if( (mx-bmx)|(my-bmy) )
831 COST_MV_SAD( mx, my );
834 /* halfpel diamond search */
835 for( int i = hpel_iters; i > 0; i-- )
837 int omx = bmx, omy = bmy;
839 int stride = 64; // candidates are either all hpel or all qpel, so one stride is enough
840 pixel *src0, *src1, *src2, *src3;
841 src0 = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], omx, omy-2, bw, bh+1, &m->weight[0] );
842 src2 = h->mc.get_ref( pix+32, &stride, m->p_fref, m->i_stride[0], omx-2, omy, bw+4, bh, &m->weight[0] );
843 src1 = src0 + stride;
845 h->pixf.fpelcmp_x4[i_pixel]( m->p_fenc[0], src0, src1, src2, src3, stride, costs );
846 COPY2_IF_LT( bcost, costs[0] + p_cost_mvx[omx ] + p_cost_mvy[omy-2], bmy, omy-2 );
847 COPY2_IF_LT( bcost, costs[1] + p_cost_mvx[omx ] + p_cost_mvy[omy+2], bmy, omy+2 );
848 COPY3_IF_LT( bcost, costs[2] + p_cost_mvx[omx-2] + p_cost_mvy[omy ], bmx, omx-2, bmy, omy );
849 COPY3_IF_LT( bcost, costs[3] + p_cost_mvx[omx+2] + p_cost_mvy[omy ], bmx, omx+2, bmy, omy );
850 if( (bmx == omx) & (bmy == omy) )
854 if( !b_refine_qpel && (h->pixf.mbcmp_unaligned[0] != h->pixf.fpelcmp[0] || b_chroma_me) )
857 COST_MV_SATD( bmx, bmy, -1 );
860 /* early termination when examining multiple reference frames */
861 if( p_halfpel_thresh )
863 if( (bcost*7)>>3 > *p_halfpel_thresh )
868 // don't need cost_mv
871 else if( bcost < *p_halfpel_thresh )
872 *p_halfpel_thresh = bcost;
875 /* quarterpel diamond search */
876 if( h->mb.i_subpel_refine != 1 )
879 for( int i = qpel_iters; i > 0; i-- )
881 if( bmy <= h->mb.mv_min_spel[1] || bmy >= h->mb.mv_max_spel[1] || bmx <= h->mb.mv_min_spel[0] || bmx >= h->mb.mv_max_spel[0] )
884 int omx = bmx, omy = bmy;
885 COST_MV_SATD( omx, omy - 1, 0 );
886 COST_MV_SATD( omx, omy + 1, 1 );
887 COST_MV_SATD( omx - 1, omy, 2 );
888 COST_MV_SATD( omx + 1, omy, 3 );
889 if( (bmx == omx) & (bmy == omy) )
893 /* Special simplified case for subme=1 */
894 else if( bmy > h->mb.mv_min_spel[1] && bmy < h->mb.mv_max_spel[1] && bmx > h->mb.mv_min_spel[0] && bmx < h->mb.mv_max_spel[0] )
897 int omx = bmx, omy = bmy;
898 /* We have to use mc_luma because all strides must be the same to use fpelcmp_x4 */
899 h->mc.mc_luma( pix , 64, m->p_fref, m->i_stride[0], omx, omy-1, bw, bh, &m->weight[0] );
900 h->mc.mc_luma( pix+16, 64, m->p_fref, m->i_stride[0], omx, omy+1, bw, bh, &m->weight[0] );
901 h->mc.mc_luma( pix+32, 64, m->p_fref, m->i_stride[0], omx-1, omy, bw, bh, &m->weight[0] );
902 h->mc.mc_luma( pix+48, 64, m->p_fref, m->i_stride[0], omx+1, omy, bw, bh, &m->weight[0] );
903 h->pixf.fpelcmp_x4[i_pixel]( m->p_fenc[0], pix, pix+16, pix+32, pix+48, 64, costs );
904 COPY2_IF_LT( bcost, costs[0] + p_cost_mvx[omx ] + p_cost_mvy[omy-1], bmy, omy-1 );
905 COPY2_IF_LT( bcost, costs[1] + p_cost_mvx[omx ] + p_cost_mvy[omy+1], bmy, omy+1 );
906 COPY3_IF_LT( bcost, costs[2] + p_cost_mvx[omx-1] + p_cost_mvy[omy ], bmx, omx-1, bmy, omy );
907 COPY3_IF_LT( bcost, costs[3] + p_cost_mvx[omx+1] + p_cost_mvy[omy ], bmx, omx+1, bmy, omy );
913 m->cost_mv = p_cost_mvx[bmx] + p_cost_mvy[bmy];
916 #define BIME_CACHE( dx, dy, list )\
918 x264_me_t *m = m##list;\
919 int i = 4 + 3*dx + dy;\
920 int mvx = bm##list##x+dx;\
921 int mvy = bm##list##y+dy;\
922 stride[list][i] = bw;\
923 src[list][i] = h->mc.get_ref( pixy_buf[list][i], &stride[list][i], m->p_fref, m->i_stride[0], mvx, mvy, bw, bh, weight_none );\
925 h->mc.mc_chroma( pixu_buf[list][i], pixv_buf[list][i], 8, m->p_fref[4], m->i_stride[1], mvx, mvy + mv##list##y_offset, bw>>1, bh>>1 );\
928 #define SATD_THRESH 17/16
930 /* Don't unroll the BIME_CACHE loop. I couldn't find any way to force this
931 * other than making its iteration count not a compile-time constant. */
932 int x264_iter_kludge = 0;
934 static void ALWAYS_INLINE x264_me_refine_bidir( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2, int rd )
938 int s8 = X264_SCAN8_0 + 2*x + 16*y;
939 int16_t *cache0_mv = h->mb.cache.mv[0][s8];
940 int16_t *cache1_mv = h->mb.cache.mv[1][s8];
941 const int i_pixel = m0->i_pixel;
942 const int bw = x264_pixel_size[i_pixel].w;
943 const int bh = x264_pixel_size[i_pixel].h;
944 ALIGNED_ARRAY_16( pixel, pixy_buf,[2],[9][16*16] );
945 ALIGNED_ARRAY_8( pixel, pixu_buf,[2],[9][8*8] );
946 ALIGNED_ARRAY_8( pixel, pixv_buf,[2],[9][8*8] );
948 pixel *pix = &h->mb.pic.p_fdec[0][8*x + 8*y*FDEC_STRIDE];
949 pixel *pixu = &h->mb.pic.p_fdec[1][4*x + 4*y*FDEC_STRIDE];
950 pixel *pixv = &h->mb.pic.p_fdec[2][4*x + 4*y*FDEC_STRIDE];
951 int ref0 = h->mb.cache.ref[0][s8];
952 int ref1 = h->mb.cache.ref[1][s8];
953 const int mv0y_offset = h->mb.b_interlaced & ref0 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
954 const int mv1y_offset = h->mb.b_interlaced & ref1 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
956 int bm0x = m0->mv[0];
957 int bm0y = m0->mv[1];
958 int bm1x = m1->mv[0];
959 int bm1y = m1->mv[1];
960 int bcost = COST_MAX;
961 int mc_list0 = 1, mc_list1 = 1;
962 uint64_t bcostrd = COST_MAX64;
964 /* each byte of visited represents 8 possible m1y positions, so a 4D array isn't needed */
965 ALIGNED_ARRAY_16( uint8_t, visited,[8],[8][8] );
966 /* all permutations of an offset in up to 2 of the dimensions */
967 static const int8_t dia4d[33][4] =
970 {0,0,0,1}, {0,0,0,-1}, {0,0,1,0}, {0,0,-1,0},
971 {0,1,0,0}, {0,-1,0,0}, {1,0,0,0}, {-1,0,0,0},
972 {0,0,1,1}, {0,0,-1,-1},{0,1,1,0}, {0,-1,-1,0},
973 {1,1,0,0}, {-1,-1,0,0},{1,0,0,1}, {-1,0,0,-1},
974 {0,1,0,1}, {0,-1,0,-1},{1,0,1,0}, {-1,0,-1,0},
975 {0,0,-1,1},{0,0,1,-1}, {0,-1,1,0},{0,1,-1,0},
976 {-1,1,0,0},{1,-1,0,0}, {1,0,0,-1},{-1,0,0,1},
977 {0,-1,0,1},{0,1,0,-1}, {-1,0,1,0},{1,0,-1,0},
980 if( bm0y < h->mb.mv_min_spel[1] + 8 || bm1y < h->mb.mv_min_spel[1] + 8 ||
981 bm0y > h->mb.mv_max_spel[1] - 8 || bm1y > h->mb.mv_max_spel[1] - 8 ||
982 bm0x < h->mb.mv_min_spel[0] + 8 || bm1x < h->mb.mv_min_spel[0] + 8 ||
983 bm0x > h->mb.mv_max_spel[0] - 8 || bm1x > h->mb.mv_max_spel[0] - 8 )
986 if( rd && m0->i_pixel != PIXEL_16x16 && i8 != 0 )
988 x264_mb_predict_mv( h, 0, i8<<2, bw>>2, m0->mvp );
989 x264_mb_predict_mv( h, 1, i8<<2, bw>>2, m1->mvp );
992 const uint16_t *p_cost_m0x = m0->p_cost_mv - m0->mvp[0];
993 const uint16_t *p_cost_m0y = m0->p_cost_mv - m0->mvp[1];
994 const uint16_t *p_cost_m1x = m1->p_cost_mv - m1->mvp[0];
995 const uint16_t *p_cost_m1y = m1->p_cost_mv - m1->mvp[1];
997 h->mc.memzero_aligned( visited, sizeof(uint8_t[8][8][8]) );
999 for( int pass = 0; pass < 8; pass++ )
1002 /* check all mv pairs that differ in at most 2 components from the current mvs. */
1003 /* doesn't do chroma ME. this probably doesn't matter, as the gains
1004 * from bidir ME are the same with and without chroma ME. */
1007 for( int j = x264_iter_kludge; j < 9; j++ )
1008 BIME_CACHE( square1[j][0], square1[j][1], 0 );
1011 for( int j = x264_iter_kludge; j < 9; j++ )
1012 BIME_CACHE( square1[j][0], square1[j][1], 1 );
1014 for( int j = !!pass; j < 33; j++ )
1016 int m0x = dia4d[j][0] + bm0x;
1017 int m0y = dia4d[j][1] + bm0y;
1018 int m1x = dia4d[j][2] + bm1x;
1019 int m1y = dia4d[j][3] + bm1y;
1020 if( !pass || !((visited[(m0x)&7][(m0y)&7][(m1x)&7] & (1<<((m1y)&7)))) )
1022 int i0 = 4 + 3*dia4d[j][0] + dia4d[j][1];
1023 int i1 = 4 + 3*dia4d[j][2] + dia4d[j][3];
1024 visited[(m0x)&7][(m0y)&7][(m1x)&7] |= (1<<((m1y)&7));
1025 h->mc.avg[i_pixel]( pix, FDEC_STRIDE, src[0][i0], stride[0][i0], src[1][i1], stride[1][i1], i_weight );
1026 int cost = h->pixf.mbcmp[i_pixel]( m0->p_fenc[0], FENC_STRIDE, pix, FDEC_STRIDE )
1027 + p_cost_m0x[m0x] + p_cost_m0y[m0y] + p_cost_m1x[m1x] + p_cost_m1y[m1y];
1030 if( cost < bcost * SATD_THRESH )
1032 bcost = X264_MIN( cost, bcost );
1033 M32( cache0_mv ) = pack16to32_mask(m0x,m0y);
1034 M32( cache1_mv ) = pack16to32_mask(m1x,m1y);
1035 h->mc.avg[i_pixel+3]( pixu, FDEC_STRIDE, pixu_buf[0][i0], 8, pixu_buf[1][i1], 8, i_weight );
1036 h->mc.avg[i_pixel+3]( pixv, FDEC_STRIDE, pixv_buf[0][i0], 8, pixv_buf[1][i1], 8, i_weight );
1037 uint64_t costrd = x264_rd_cost_part( h, i_lambda2, i8*4, m0->i_pixel );
1038 COPY2_IF_LT( bcostrd, costrd, bestj, j );
1042 COPY2_IF_LT( bcost, cost, bestj, j );
1049 bm0x += dia4d[bestj][0];
1050 bm0y += dia4d[bestj][1];
1051 bm1x += dia4d[bestj][2];
1052 bm1y += dia4d[bestj][3];
1054 mc_list0 = M16( &dia4d[bestj][0] );
1055 mc_list1 = M16( &dia4d[bestj][2] );
1060 x264_macroblock_cache_mv ( h, 2*x, 2*y, bw>>2, bh>>2, 0, pack16to32_mask(bm0x, bm0y) );
1061 amvd = pack8to16( X264_MIN(abs(bm0x - m0->mvp[0]),33), X264_MIN(abs(bm0y - m0->mvp[1]),33) );
1062 x264_macroblock_cache_mvd( h, 2*x, 2*y, bw>>2, bh>>2, 0, amvd );
1064 x264_macroblock_cache_mv ( h, 2*x, 2*y, bw>>2, bh>>2, 1, pack16to32_mask(bm1x, bm1y) );
1065 amvd = pack8to16( X264_MIN(abs(bm1x - m1->mvp[0]),33), X264_MIN(abs(bm1y - m1->mvp[1]),33) );
1066 x264_macroblock_cache_mvd( h, 2*x, 2*y, bw>>2, bh>>2, 1, amvd );
1075 void x264_me_refine_bidir_satd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight )
1077 x264_me_refine_bidir( h, m0, m1, i_weight, 0, 0, 0 );
1080 void x264_me_refine_bidir_rd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2 )
1082 /* Motion compensation is done as part of bidir_rd; don't repeat
1083 * it in encoding. */
1084 h->mb.b_skip_mc = 1;
1085 x264_me_refine_bidir( h, m0, m1, i_weight, i8, i_lambda2, 1 );
1086 h->mb.b_skip_mc = 0;
1090 #define COST_MV_SATD( mx, my, dst, avoid_mvp ) \
1092 if( !avoid_mvp || !(mx == pmx && my == pmy) ) \
1094 h->mc.mc_luma( pix, FDEC_STRIDE, m->p_fref, m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
1095 dst = h->pixf.mbcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, pix, FDEC_STRIDE ) \
1096 + p_cost_mvx[mx] + p_cost_mvy[my]; \
1097 COPY1_IF_LT( bsatd, dst ); \
1103 #define COST_MV_RD( mx, my, satd, do_dir, mdir ) \
1105 if( satd <= bsatd * SATD_THRESH ) \
1108 M32( cache_mv ) = pack16to32_mask(mx,my); \
1109 if( m->i_pixel <= PIXEL_8x8 ) \
1110 h->mc.mc_chroma( pixu, pixv, FDEC_STRIDE, m->p_fref[4], m->i_stride[1], mx, my + mvy_offset, bw>>1, bh>>1 ); \
1111 cost = x264_rd_cost_part( h, i_lambda2, i4, m->i_pixel ); \
1112 COPY4_IF_LT( bcost, cost, bmx, mx, bmy, my, dir, do_dir?mdir:dir ); \
1116 void x264_me_refine_qpel_rd( x264_t *h, x264_me_t *m, int i_lambda2, int i4, int i_list )
1118 int16_t *cache_mv = h->mb.cache.mv[i_list][x264_scan8[i4]];
1119 const uint16_t *p_cost_mvx, *p_cost_mvy;
1120 const int bw = x264_pixel_size[m->i_pixel].w;
1121 const int bh = x264_pixel_size[m->i_pixel].h;
1122 const int i_pixel = m->i_pixel;
1123 const int mvy_offset = h->mb.b_interlaced & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
1125 uint64_t bcost = COST_MAX64;
1128 int omx, omy, pmx, pmy;
1135 pixel *pix = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
1136 pixel *pixu = &h->mb.pic.p_fdec[1][(i8>>1)*4*FDEC_STRIDE+(i8&1)*4];
1137 pixel *pixv = &h->mb.pic.p_fdec[2][(i8>>1)*4*FDEC_STRIDE+(i8&1)*4];
1139 h->mb.b_skip_mc = 1;
1141 if( m->i_pixel != PIXEL_16x16 && i4 != 0 )
1142 x264_mb_predict_mv( h, i_list, i4, bw>>2, m->mvp );
1145 p_cost_mvx = m->p_cost_mv - pmx;
1146 p_cost_mvy = m->p_cost_mv - pmy;
1147 COST_MV_SATD( bmx, bmy, bsatd, 0 );
1148 if( m->i_pixel != PIXEL_16x16 )
1149 COST_MV_RD( bmx, bmy, 0, 0, 0 )
1153 /* check the predicted mv */
1154 if( (bmx != pmx || bmy != pmy)
1155 && pmx >= h->mb.mv_min_spel[0] && pmx <= h->mb.mv_max_spel[0]
1156 && pmy >= h->mb.mv_min_spel[1] && pmy <= h->mb.mv_max_spel[1] )
1158 COST_MV_SATD( pmx, pmy, satd, 0 );
1159 COST_MV_RD ( pmx, pmy, satd, 0, 0 );
1160 /* The hex motion search is guaranteed to not repeat the center candidate,
1161 * so if pmv is chosen, set the "MV to avoid checking" to bmv instead. */
1162 if( bmx == pmx && bmy == pmy )
1169 if( bmy < h->mb.mv_min_spel[1] + 3 || bmy > h->mb.mv_max_spel[1] - 3 ||
1170 bmx < h->mb.mv_min_spel[0] + 3 || bmx > h->mb.mv_max_spel[0] - 3 )
1172 h->mb.b_skip_mc = 0;
1176 /* subpel hex search, same pattern as ME HEX. */
1180 for( int j = 0; j < 6; j++ )
1182 COST_MV_SATD( omx + hex2[j+1][0], omy + hex2[j+1][1], satd, 1 );
1183 COST_MV_RD ( omx + hex2[j+1][0], omy + hex2[j+1][1], satd, 1, j );
1188 /* half hexagon, not overlapping the previous iteration */
1189 for( int i = 1; i < 10; i++ )
1191 const int odir = mod6m1[dir+1];
1192 if( bmy < h->mb.mv_min_spel[1] + 3 ||
1193 bmy > h->mb.mv_max_spel[1] - 3 )
1198 for( int j = 0; j < 3; j++ )
1200 COST_MV_SATD( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satd, 1 );
1201 COST_MV_RD ( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satd, 1, odir-1+j );
1208 /* square refine, same pattern as ME HEX. */
1211 for( int i = 0; i < 8; i++ )
1213 COST_MV_SATD( omx + square1[i+1][0], omy + square1[i+1][1], satd, 1 );
1214 COST_MV_RD ( omx + square1[i+1][0], omy + square1[i+1][1], satd, 0, 0 );
1220 x264_macroblock_cache_mv ( h, block_idx_x[i4], block_idx_y[i4], bw>>2, bh>>2, i_list, pack16to32_mask(bmx, bmy) );
1221 amvd = pack8to16( X264_MIN(abs(bmx - m->mvp[0]),33), X264_MIN(abs(bmy - m->mvp[1]),33) );
1222 x264_macroblock_cache_mvd( h, block_idx_x[i4], block_idx_y[i4], bw>>2, bh>>2, i_list, amvd );
1223 h->mb.b_skip_mc = 0;