1 /*****************************************************************************
2 * me.c: h264 encoder library (Motion Estimation)
3 *****************************************************************************
4 * Copyright (C) 2003-2008 x264 project
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 *****************************************************************************/
25 #include "common/common.h"
28 /* presets selected from good points on the speed-vs-quality curve of several test videos
29 * subpel_iters[i_subpel_refine] = { refine_hpel, refine_qpel, me_hpel, me_qpel }
30 * where me_* are the number of EPZS iterations run on all candidate block types,
31 * and refine_* are run only on the winner.
32 * the subme=8,9 values are much higher because any amount of satd search makes
33 * up its time by reducing the number of qpel-rd iterations. */
34 static const int subpel_iterations[][4] =
47 static const int mod6m1[8] = {5,0,1,2,3,4,5,0};
48 /* radius 2 hexagon. repeated entries are to avoid having to compute mod6 every time. */
49 static const int hex2[8][2] = {{-1,-2}, {-2,0}, {-1,2}, {1,2}, {2,0}, {1,-2}, {-1,-2}, {-2,0}};
50 static const int square1[8][2] = {{0,-1}, {0,1}, {-1,0}, {1,0}, {-1,-1}, {1,1}, {-1,1}, {1,-1}};
52 static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_iters, int *p_halfpel_thresh, int b_refine_qpel );
54 #define BITS_MVD( mx, my )\
55 (p_cost_mvx[(mx)<<2] + p_cost_mvy[(my)<<2])
57 #define COST_MV( mx, my )\
59 int cost = h->pixf.fpelcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE,\
60 &p_fref[(my)*m->i_stride[0]+(mx)], m->i_stride[0] )\
62 COPY3_IF_LT( bcost, cost, bmx, mx, bmy, my );\
65 #define COST_MV_HPEL( mx, my ) \
68 uint8_t *src = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], mx, my, bw, bh ); \
69 int cost = h->pixf.fpelcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
70 + p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
71 COPY3_IF_LT( bpred_cost, cost, bpred_mx, mx, bpred_my, my ); \
74 #define COST_MV_X3_DIR( m0x, m0y, m1x, m1y, m2x, m2y, costs )\
76 uint8_t *pix_base = p_fref + bmx + bmy*m->i_stride[0];\
77 h->pixf.fpelcmp_x3[i_pixel]( m->p_fenc[0],\
78 pix_base + (m0x) + (m0y)*m->i_stride[0],\
79 pix_base + (m1x) + (m1y)*m->i_stride[0],\
80 pix_base + (m2x) + (m2y)*m->i_stride[0],\
81 m->i_stride[0], costs );\
82 (costs)[0] += BITS_MVD( bmx+(m0x), bmy+(m0y) );\
83 (costs)[1] += BITS_MVD( bmx+(m1x), bmy+(m1y) );\
84 (costs)[2] += BITS_MVD( bmx+(m2x), bmy+(m2y) );\
87 #define COST_MV_X4( m0x, m0y, m1x, m1y, m2x, m2y, m3x, m3y )\
89 uint8_t *pix_base = p_fref + omx + omy*m->i_stride[0];\
90 h->pixf.fpelcmp_x4[i_pixel]( m->p_fenc[0],\
91 pix_base + (m0x) + (m0y)*m->i_stride[0],\
92 pix_base + (m1x) + (m1y)*m->i_stride[0],\
93 pix_base + (m2x) + (m2y)*m->i_stride[0],\
94 pix_base + (m3x) + (m3y)*m->i_stride[0],\
95 m->i_stride[0], costs );\
96 costs[0] += BITS_MVD( omx+(m0x), omy+(m0y) );\
97 costs[1] += BITS_MVD( omx+(m1x), omy+(m1y) );\
98 costs[2] += BITS_MVD( omx+(m2x), omy+(m2y) );\
99 costs[3] += BITS_MVD( omx+(m3x), omy+(m3y) );\
100 COPY3_IF_LT( bcost, costs[0], bmx, omx+(m0x), bmy, omy+(m0y) );\
101 COPY3_IF_LT( bcost, costs[1], bmx, omx+(m1x), bmy, omy+(m1y) );\
102 COPY3_IF_LT( bcost, costs[2], bmx, omx+(m2x), bmy, omy+(m2y) );\
103 COPY3_IF_LT( bcost, costs[3], bmx, omx+(m3x), bmy, omy+(m3y) );\
106 #define COST_MV_X3_ABS( m0x, m0y, m1x, m1y, m2x, m2y )\
108 h->pixf.fpelcmp_x3[i_pixel]( m->p_fenc[0],\
109 p_fref + (m0x) + (m0y)*m->i_stride[0],\
110 p_fref + (m1x) + (m1y)*m->i_stride[0],\
111 p_fref + (m2x) + (m2y)*m->i_stride[0],\
112 m->i_stride[0], costs );\
113 costs[0] += p_cost_mvx[(m0x)<<2]; /* no cost_mvy */\
114 costs[1] += p_cost_mvx[(m1x)<<2];\
115 costs[2] += p_cost_mvx[(m2x)<<2];\
116 COPY3_IF_LT( bcost, costs[0], bmx, m0x, bmy, m0y );\
117 COPY3_IF_LT( bcost, costs[1], bmx, m1x, bmy, m1y );\
118 COPY3_IF_LT( bcost, costs[2], bmx, m2x, bmy, m2y );\
124 #define DIA1_ITER( mx, my )\
127 COST_MV_X4( 0,-1, 0,1, -1,0, 1,0 );\
130 #define CROSS( start, x_max, y_max )\
133 if( x_max <= X264_MIN(mv_x_max-omx, omx-mv_x_min) )\
134 for( ; i < x_max-2; i+=4 )\
135 COST_MV_X4( i,0, -i,0, i+2,0, -i-2,0 );\
136 for( ; i < x_max; i+=2 )\
138 if( omx+i <= mv_x_max )\
139 COST_MV( omx+i, omy );\
140 if( omx-i >= mv_x_min )\
141 COST_MV( omx-i, omy );\
144 if( y_max <= X264_MIN(mv_y_max-omy, omy-mv_y_min) )\
145 for( ; i < y_max-2; i+=4 )\
146 COST_MV_X4( 0,i, 0,-i, 0,i+2, 0,-i-2 );\
147 for( ; i < y_max; i+=2 )\
149 if( omy+i <= mv_y_max )\
150 COST_MV( omx, omy+i );\
151 if( omy-i >= mv_y_min )\
152 COST_MV( omx, omy-i );\
156 void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, int *p_halfpel_thresh )
158 const int bw = x264_pixel_size[m->i_pixel].w;
159 const int bh = x264_pixel_size[m->i_pixel].h;
160 const int i_pixel = m->i_pixel;
161 int i_me_range = h->param.analyse.i_me_range;
163 int bpred_mx = 0, bpred_my = 0, bpred_cost = COST_MAX;
164 int omx, omy, pmx, pmy;
165 uint8_t *p_fref = m->p_fref[0];
166 DECLARE_ALIGNED_16( uint8_t pix[16*16] );
172 int mv_x_min = h->mb.mv_min_fpel[0];
173 int mv_y_min = h->mb.mv_min_fpel[1];
174 int mv_x_max = h->mb.mv_max_fpel[0];
175 int mv_y_max = h->mb.mv_max_fpel[1];
177 #define CHECK_MVRANGE(mx,my) ( mx >= mv_x_min && mx <= mv_x_max && my >= mv_y_min && my <= mv_y_max )
179 const int16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0];
180 const int16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
182 bmx = x264_clip3( m->mvp[0], mv_x_min*4, mv_x_max*4 );
183 bmy = x264_clip3( m->mvp[1], mv_y_min*4, mv_y_max*4 );
184 pmx = ( bmx + 2 ) >> 2;
185 pmy = ( bmy + 2 ) >> 2;
188 /* try extra predictors if provided */
189 if( h->mb.i_subpel_refine >= 3 )
191 uint32_t bmv = pack16to32_mask(bmx,bmy);
192 COST_MV_HPEL( bmx, bmy );
195 if( *(uint32_t*)mvc[i] && (bmv - *(uint32_t*)mvc[i]) )
197 int mx = x264_clip3( mvc[i][0], mv_x_min*4, mv_x_max*4 );
198 int my = x264_clip3( mvc[i][1], mv_y_min*4, mv_y_max*4 );
199 COST_MV_HPEL( mx, my );
201 } while( ++i < i_mvc );
202 bmx = ( bpred_mx + 2 ) >> 2;
203 bmy = ( bpred_my + 2 ) >> 2;
210 /* Because we are rounding the predicted motion vector to fullpel, there will be
211 * an extra MV cost in 15 out of 16 cases. However, when the predicted MV is
212 * chosen as the best predictor, it is often the case that the subpel search will
213 * result in a vector at or next to the predicted motion vector. Therefore, it is
214 * sensible to remove the cost of the MV from the rounded MVP to avoid unfairly
215 * biasing against use of the predicted motion vector. */
216 bcost -= BITS_MVD( pmx, pmy );
219 int mx = (mvc[i][0] + 2) >> 2;
220 int my = (mvc[i][1] + 2) >> 2;
221 if( (mx | my) && ((mx-bmx) | (my-bmy)) )
223 mx = x264_clip3( mx, mv_x_min, mv_x_max );
224 my = x264_clip3( my, mv_y_min, mv_y_max );
227 } while( ++i < i_mvc );
231 switch( h->mb.i_me_method )
234 /* diamond search, radius 1 */
238 DIA1_ITER( bmx, bmy );
239 if( (bmx == omx) & (bmy == omy) )
241 if( !CHECK_MVRANGE(bmx, bmy) )
243 } while( ++i < i_me_range );
248 /* hexagon search, radius 2 */
250 for( i = 0; i < i_me_range/2; i++ )
252 omx = bmx; omy = bmy;
253 COST_MV( omx-2, omy );
254 COST_MV( omx-1, omy+2 );
255 COST_MV( omx+1, omy+2 );
256 COST_MV( omx+2, omy );
257 COST_MV( omx+1, omy-2 );
258 COST_MV( omx-1, omy-2 );
259 if( bmx == omx && bmy == omy )
261 if( !CHECK_MVRANGE(bmx, bmy) )
265 /* equivalent to the above, but eliminates duplicate candidates */
269 COST_MV_X3_DIR( -2,0, -1, 2, 1, 2, costs );
270 COST_MV_X3_DIR( 2,0, 1,-2, -1,-2, costs+3 );
271 COPY2_IF_LT( bcost, costs[0], dir, 0 );
272 COPY2_IF_LT( bcost, costs[1], dir, 1 );
273 COPY2_IF_LT( bcost, costs[2], dir, 2 );
274 COPY2_IF_LT( bcost, costs[3], dir, 3 );
275 COPY2_IF_LT( bcost, costs[4], dir, 4 );
276 COPY2_IF_LT( bcost, costs[5], dir, 5 );
280 bmx += hex2[dir+1][0];
281 bmy += hex2[dir+1][1];
282 /* half hexagon, not overlapping the previous iteration */
283 for( i = 1; i < i_me_range/2 && CHECK_MVRANGE(bmx, bmy); i++ )
285 const int odir = mod6m1[dir+1];
286 COST_MV_X3_DIR( hex2[odir+0][0], hex2[odir+0][1],
287 hex2[odir+1][0], hex2[odir+1][1],
288 hex2[odir+2][0], hex2[odir+2][1],
291 COPY2_IF_LT( bcost, costs[0], dir, odir-1 );
292 COPY2_IF_LT( bcost, costs[1], dir, odir );
293 COPY2_IF_LT( bcost, costs[2], dir, odir+1 );
296 bmx += hex2[dir+1][0];
297 bmy += hex2[dir+1][1];
302 omx = bmx; omy = bmy;
303 COST_MV_X4( 0,-1, 0,1, -1,0, 1,0 );
304 COST_MV_X4( -1,-1, -1,1, 1,-1, 1,1 );
309 /* Uneven-cross Multi-Hexagon-grid Search
310 * as in JM, except with different early termination */
312 static const int x264_pixel_size_shift[7] = { 0, 1, 1, 2, 3, 3, 4 };
317 /* refine predictors */
319 DIA1_ITER( pmx, pmy );
323 if(i_pixel == PIXEL_4x4)
327 if( (bmx | bmy) && ((bmx-pmx) | (bmy-pmy)) )
328 DIA1_ITER( bmx, bmy );
329 if( bcost == ucost2 )
331 omx = bmx; omy = bmy;
333 /* early termination */
334 #define SAD_THRESH(v) ( bcost < ( v >> x264_pixel_size_shift[i_pixel] ) )
335 if( bcost == ucost2 && SAD_THRESH(2000) )
337 COST_MV_X4( 0,-2, -1,-1, 1,-1, -2,0 );
338 COST_MV_X4( 2, 0, -1, 1, 1, 1, 0,2 );
339 if( bcost == ucost1 && SAD_THRESH(500) )
341 if( bcost == ucost2 )
343 int range = (i_me_range>>1) | 1;
344 CROSS( 3, range, range );
345 COST_MV_X4( -1,-2, 1,-2, -2,-1, 2,-1 );
346 COST_MV_X4( -2, 1, 2, 1, -1, 2, 1, 2 );
347 if( bcost == ucost2 )
349 cross_start = range + 2;
353 /* adaptive search range */
356 /* range multipliers based on casual inspection of some statistics of
357 * average distance between current predictor and final mv found by ESA.
358 * these have not been tuned much by actual encoding. */
359 static const int range_mul[4][4] =
367 int sad_ctx, mvd_ctx;
372 if( i_pixel == PIXEL_16x16 )
373 /* mvc is probably the same as mvp, so the difference isn't meaningful.
374 * but prediction usually isn't too bad, so just use medium range */
377 mvd = abs( m->mvp[0] - mvc[0][0] )
378 + abs( m->mvp[1] - mvc[0][1] );
382 /* calculate the degree of agreement between predictors. */
383 /* in 16x16, mvc includes all the neighbors used to make mvp,
384 * so don't count mvp separately. */
387 if( i_pixel != PIXEL_16x16 )
389 mvd = abs( m->mvp[0] - mvc[0][0] )
390 + abs( m->mvp[1] - mvc[0][1] );
393 mvd += x264_predictor_difference( mvc, i_mvc );
396 sad_ctx = SAD_THRESH(1000) ? 0
397 : SAD_THRESH(2000) ? 1
398 : SAD_THRESH(4000) ? 2 : 3;
399 mvd_ctx = mvd < 10*denom ? 0
401 : mvd < 40*denom ? 2 : 3;
403 i_me_range = i_me_range * range_mul[mvd_ctx][sad_ctx] / 4;
406 /* FIXME if the above DIA2/OCT2/CROSS found a new mv, it has not updated omx/omy.
407 * we are still centered on the same place as the DIA2. is this desirable? */
408 CROSS( cross_start, i_me_range, i_me_range/2 );
410 COST_MV_X4( -2,-2, -2,2, 2,-2, 2,2 );
413 omx = bmx; omy = bmy;
418 static const int hex4[16][2] = {
419 {-4, 2}, {-4, 1}, {-4, 0}, {-4,-1}, {-4,-2},
420 { 4,-2}, { 4,-1}, { 4, 0}, { 4, 1}, { 4, 2},
421 { 2, 3}, { 0, 4}, {-2, 3},
422 {-2,-3}, { 0,-4}, { 2,-3},
425 if( 4*i > X264_MIN4( mv_x_max-omx, omx-mv_x_min,
426 mv_y_max-omy, omy-mv_y_min ) )
428 for( j = 0; j < 16; j++ )
430 int mx = omx + hex4[j][0]*i;
431 int my = omy + hex4[j][1]*i;
432 if( CHECK_MVRANGE(mx, my) )
438 COST_MV_X4( -4*i, 2*i, -4*i, 1*i, -4*i, 0*i, -4*i,-1*i );
439 COST_MV_X4( -4*i,-2*i, 4*i,-2*i, 4*i,-1*i, 4*i, 0*i );
440 COST_MV_X4( 4*i, 1*i, 4*i, 2*i, 2*i, 3*i, 0*i, 4*i );
441 COST_MV_X4( -2*i, 3*i, -2*i,-3*i, 0*i,-4*i, 2*i,-3*i );
443 } while( ++i <= i_me_range/4 );
444 if( bmy <= mv_y_max )
452 const int min_x = X264_MAX( bmx - i_me_range, mv_x_min );
453 const int min_y = X264_MAX( bmy - i_me_range, mv_y_min );
454 const int max_x = X264_MIN( bmx + i_me_range, mv_x_max );
455 const int max_y = X264_MIN( bmy + i_me_range, mv_y_max );
456 /* SEA is fastest in multiples of 4 */
457 const int width = (max_x - min_x + 3) & ~3;
460 /* plain old exhaustive search */
462 for( my = min_y; my <= max_y; my++ )
463 for( mx = min_x; mx <= max_x; mx++ )
466 /* successive elimination by comparing DC before a full SAD,
467 * because sum(abs(diff)) >= abs(diff(sum)). */
468 const int stride = m->i_stride[0];
469 uint16_t *sums_base = m->integral;
470 /* due to a GCC bug on some platforms (win32?), zero[] may not actually be aligned.
471 * unlike the similar case in ratecontrol.c, this is not a problem because it is not used for any
472 * SSE instructions and the only loss is a tiny bit of performance. */
473 DECLARE_ALIGNED_16( static uint8_t zero[8*FENC_STRIDE] );
474 DECLARE_ALIGNED_16( int enc_dc[4] );
475 int sad_size = i_pixel <= PIXEL_8x8 ? PIXEL_8x8 : PIXEL_4x4;
476 int delta = x264_pixel_size[sad_size].w;
478 int16_t *xs = width<=64 ? xs_buf : x264_malloc( (width+15)*sizeof(int16_t) );
480 uint16_t *cost_fpel_mvx = x264_cost_mv_fpel[h->mb.i_qp][-m->mvp[0]&3] + (-m->mvp[0]>>2);
482 h->pixf.sad_x4[sad_size]( zero, m->p_fenc[0], m->p_fenc[0]+delta,
483 m->p_fenc[0]+delta*FENC_STRIDE, m->p_fenc[0]+delta+delta*FENC_STRIDE,
484 FENC_STRIDE, enc_dc );
486 sums_base += stride * (h->fenc->i_lines[0] + PADV*2);
487 if( i_pixel == PIXEL_16x16 || i_pixel == PIXEL_8x16 || i_pixel == PIXEL_4x8 )
489 if( i_pixel == PIXEL_8x16 || i_pixel == PIXEL_4x8 )
490 enc_dc[1] = enc_dc[2];
492 if( h->mb.i_me_method == X264_ME_TESA )
494 // ADS threshold, then SAD threshold, then keep the best few SADs, then SATD
499 mvsad_t *mvsads = x264_malloc( width*(max_y-min_y+1)*sizeof(mvsad_t) );
500 int nmvsad = 0, limit;
501 int sad_thresh = i_me_range <= 16 ? 10 : i_me_range <= 24 ? 11 : 12;
502 int bsad = h->pixf.sad[i_pixel]( m->p_fenc[0], FENC_STRIDE, p_fref+bmy*stride+bmx, stride )
503 + BITS_MVD( bmx, bmy );
504 for( my = min_y; my <= max_y; my++ )
506 int ycost = p_cost_mvy[my<<2];
510 xn = h->pixf.ads[i_pixel]( enc_dc, sums_base + min_x + my * stride, delta,
511 cost_fpel_mvx+min_x, xs, width, bsad*17/16 );
512 for( i=0; i<xn-2; i+=3 )
514 uint8_t *ref = p_fref+min_x+my*stride;
516 h->pixf.sad_x3[i_pixel]( m->p_fenc[0], ref+xs[i], ref+xs[i+1], ref+xs[i+2], stride, sads );
519 int sad = sads[j] + cost_fpel_mvx[xs[i+j]];
520 if( sad < bsad*sad_thresh>>3 )
522 COPY1_IF_LT( bsad, sad );
523 mvsads[nmvsad].sad = sad + ycost;
524 mvsads[nmvsad].mx = min_x+xs[i+j];
525 mvsads[nmvsad].my = my;
532 int mx = min_x+xs[i];
533 int sad = h->pixf.sad[i_pixel]( m->p_fenc[0], FENC_STRIDE, p_fref+mx+my*stride, stride )
534 + cost_fpel_mvx[xs[i]];
535 if( sad < bsad*sad_thresh>>3 )
537 COPY1_IF_LT( bsad, sad );
538 mvsads[nmvsad].sad = sad + ycost;
539 mvsads[nmvsad].mx = mx;
540 mvsads[nmvsad].my = my;
547 limit = i_me_range / 2;
548 if( nmvsad > limit*2 )
550 // halve the range if the domain is too large... eh, close enough
551 bsad = bsad*(sad_thresh+8)>>4;
552 for( i=0; i<nmvsad && mvsads[i].sad <= bsad; i++ );
553 for( j=i; j<nmvsad; j++ )
554 if( mvsads[j].sad <= bsad )
556 /* mvsad_t is not guaranteed to be 8 bytes on all archs, so check before using explicit write-combining */
557 if( sizeof( mvsad_t ) == sizeof( uint64_t ) )
558 *(uint64_t*)&mvsads[i++] = *(uint64_t*)&mvsads[j];
560 mvsads[i++] = mvsads[j];
566 for( i=0; i<limit; i++ )
569 int bsad = mvsads[bj].sad;
570 for( j=i+1; j<nmvsad; j++ )
571 COPY2_IF_LT( bsad, mvsads[j].sad, bj, j );
574 if( sizeof( mvsad_t ) == sizeof( uint64_t ) )
575 XCHG( uint64_t, *(uint64_t*)&mvsads[i], *(uint64_t*)&mvsads[bj] );
577 XCHG( mvsad_t, mvsads[i], mvsads[bj] );
582 for( i=0; i<nmvsad; i++ )
583 COST_MV( mvsads[i].mx, mvsads[i].my );
589 for( my = min_y; my <= max_y; my++ )
591 int ycost = p_cost_mvy[my<<2];
595 xn = h->pixf.ads[i_pixel]( enc_dc, sums_base + min_x + my * stride, delta,
596 cost_fpel_mvx+min_x, xs, width, bcost );
597 for( i=0; i<xn-2; i+=3 )
598 COST_MV_X3_ABS( min_x+xs[i],my, min_x+xs[i+1],my, min_x+xs[i+2],my );
601 COST_MV( min_x+xs[i], my );
613 if( bpred_cost < bcost )
617 m->cost = bpred_cost;
626 /* compute the real cost */
627 m->cost_mv = p_cost_mvx[ m->mv[0] ] + p_cost_mvy[ m->mv[1] ];
628 if( bmx == pmx && bmy == pmy && h->mb.i_subpel_refine < 3 )
629 m->cost += m->cost_mv;
632 if( h->mb.i_subpel_refine >= 2 )
634 int hpel = subpel_iterations[h->mb.i_subpel_refine][2];
635 int qpel = subpel_iterations[h->mb.i_subpel_refine][3];
636 refine_subpel( h, m, hpel, qpel, p_halfpel_thresh, 0 );
638 else if( m->mv[1] > h->mb.mv_max_spel[1] )
639 m->mv[1] = h->mb.mv_max_spel[1];
643 void x264_me_refine_qpel( x264_t *h, x264_me_t *m )
645 int hpel = subpel_iterations[h->mb.i_subpel_refine][0];
646 int qpel = subpel_iterations[h->mb.i_subpel_refine][1];
648 if( m->i_pixel <= PIXEL_8x8 && h->sh.i_type == SLICE_TYPE_P )
649 m->cost -= m->i_ref_cost;
651 refine_subpel( h, m, hpel, qpel, NULL, 1 );
654 #define COST_MV_SAD( mx, my ) \
657 uint8_t *src = h->mc.get_ref( pix[0], &stride, m->p_fref, m->i_stride[0], mx, my, bw, bh ); \
658 int cost = h->pixf.fpelcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
659 + p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
660 COPY3_IF_LT( bcost, cost, bmx, mx, bmy, my ); \
663 #define COST_MV_SATD( mx, my, dir ) \
664 if( b_refine_qpel || (dir^1) != odir ) \
667 uint8_t *src = h->mc.get_ref( pix[0], &stride, m->p_fref, m->i_stride[0], mx, my, bw, bh ); \
668 int cost = h->pixf.mbcmp_unaligned[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
669 + p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
670 if( b_chroma_me && cost < bcost ) \
672 h->mc.mc_chroma( pix[0], 8, m->p_fref[4], m->i_stride[1], mx, my, bw/2, bh/2 ); \
673 cost += h->pixf.mbcmp[i_pixel+3]( m->p_fenc[1], FENC_STRIDE, pix[0], 8 ); \
676 h->mc.mc_chroma( pix[0], 8, m->p_fref[5], m->i_stride[1], mx, my, bw/2, bh/2 ); \
677 cost += h->pixf.mbcmp[i_pixel+3]( m->p_fenc[2], FENC_STRIDE, pix[0], 8 ); \
689 static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_iters, int *p_halfpel_thresh, int b_refine_qpel )
691 const int bw = x264_pixel_size[m->i_pixel].w;
692 const int bh = x264_pixel_size[m->i_pixel].h;
693 const int16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0];
694 const int16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
695 const int i_pixel = m->i_pixel;
696 const int b_chroma_me = h->mb.b_chroma_me && i_pixel <= PIXEL_8x8;
698 DECLARE_ALIGNED_16( uint8_t pix[2][32*18] ); // really 17x17, but round up for alignment
707 /* try the subpel component of the predicted mv */
708 if( hpel_iters && h->mb.i_subpel_refine < 3 )
710 int mx = x264_clip3( m->mvp[0], h->mb.mv_min_spel[0], h->mb.mv_max_spel[0] );
711 int my = x264_clip3( m->mvp[1], h->mb.mv_min_spel[1], h->mb.mv_max_spel[1] );
712 if( (mx-bmx)|(my-bmy) )
713 COST_MV_SAD( mx, my );
716 /* halfpel diamond search */
717 for( i = hpel_iters; i > 0; i-- )
719 int omx = bmx, omy = bmy;
721 int stride = 32; // candidates are either all hpel or all qpel, so one stride is enough
722 uint8_t *src0, *src1, *src2, *src3;
723 src0 = h->mc.get_ref( pix[0], &stride, m->p_fref, m->i_stride[0], omx, omy-2, bw, bh+1 );
724 src2 = h->mc.get_ref( pix[1], &stride, m->p_fref, m->i_stride[0], omx-2, omy, bw+4, bh );
725 src1 = src0 + stride;
727 h->pixf.fpelcmp_x4[i_pixel]( m->p_fenc[0], src0, src1, src2, src3, stride, costs );
728 COPY2_IF_LT( bcost, costs[0] + p_cost_mvx[omx ] + p_cost_mvy[omy-2], bmy, omy-2 );
729 COPY2_IF_LT( bcost, costs[1] + p_cost_mvx[omx ] + p_cost_mvy[omy+2], bmy, omy+2 );
730 COPY3_IF_LT( bcost, costs[2] + p_cost_mvx[omx-2] + p_cost_mvy[omy ], bmx, omx-2, bmy, omy );
731 COPY3_IF_LT( bcost, costs[3] + p_cost_mvx[omx+2] + p_cost_mvy[omy ], bmx, omx+2, bmy, omy );
732 if( (bmx == omx) & (bmy == omy) )
738 /* check for mvrange */
739 if( bmy > h->mb.mv_max_spel[1] )
740 bmy = h->mb.mv_max_spel[1];
742 COST_MV_SATD( bmx, bmy, -1 );
745 /* early termination when examining multiple reference frames */
746 if( p_halfpel_thresh )
748 if( (bcost*7)>>3 > *p_halfpel_thresh )
753 // don't need cost_mv
756 else if( bcost < *p_halfpel_thresh )
757 *p_halfpel_thresh = bcost;
760 /* quarterpel diamond search */
762 for( i = qpel_iters; i > 0; i-- )
767 COST_MV_SATD( omx, omy - 1, 0 );
768 COST_MV_SATD( omx, omy + 1, 1 );
769 COST_MV_SATD( omx - 1, omy, 2 );
770 COST_MV_SATD( omx + 1, omy, 3 );
771 if( bmx == omx && bmy == omy )
775 /* check for mvrange */
776 if( bmy > h->mb.mv_max_spel[1] )
778 bmy = h->mb.mv_max_spel[1];
780 COST_MV_SATD( bmx, bmy, -1 );
786 m->cost_mv = p_cost_mvx[ bmx ] + p_cost_mvy[ bmy ];
789 #define BIME_CACHE( dx, dy ) \
791 int i = 4 + 3*dx + dy; \
794 src0[i] = h->mc.get_ref( pix0[i], &stride0[i], m0->p_fref, m0->i_stride[0], om0x+dx, om0y+dy, bw, bh ); \
795 src1[i] = h->mc.get_ref( pix1[i], &stride1[i], m1->p_fref, m1->i_stride[0], om1x+dx, om1y+dy, bw, bh ); \
798 #define BIME_CACHE2(a,b) \
800 BIME_CACHE(-(a),-(b))
802 #define SATD_THRESH 17/16
804 #define COST_BIMV_SATD( m0x, m0y, m1x, m1y ) \
805 if( pass == 0 || !((visited[(m0x)&7][(m0y)&7][(m1x)&7] & (1<<((m1y)&7)))) ) \
808 int i0 = 4 + 3*(m0x-om0x) + (m0y-om0y); \
809 int i1 = 4 + 3*(m1x-om1x) + (m1y-om1y); \
810 visited[(m0x)&7][(m0y)&7][(m1x)&7] |= (1<<((m1y)&7));\
811 h->mc.avg[i_pixel]( pix, bw, src0[i0], stride0[i0], src1[i1], stride1[i1], i_weight ); \
812 cost = h->pixf.mbcmp[i_pixel]( m0->p_fenc[0], FENC_STRIDE, pix, bw ) \
813 + p_cost_m0x[ m0x ] + p_cost_m0y[ m0y ] \
814 + p_cost_m1x[ m1x ] + p_cost_m1y[ m1y ]; \
817 if( cost < bcost * SATD_THRESH ) \
822 *(uint32_t*)cache0_mv = *(uint32_t*)cache0_mv2 = pack16to32_mask(m0x,m0y); \
823 *(uint32_t*)cache1_mv = *(uint32_t*)cache1_mv2 = pack16to32_mask(m1x,m1y); \
824 costrd = x264_rd_cost_part( h, i_lambda2, i8, m0->i_pixel ); \
825 if( costrd < bcostrd ) \
835 else if( cost < bcost ) \
845 #define CHECK_BIDIR(a,b,c,d) \
846 COST_BIMV_SATD(om0x+a, om0y+b, om1x+c, om1y+d)
848 #define CHECK_BIDIR2(a,b,c,d) \
849 CHECK_BIDIR(a,b,c,d) \
850 CHECK_BIDIR(-(a),-(b),-(c),-(d))
852 #define CHECK_BIDIR8(a,b,c,d) \
853 CHECK_BIDIR2(a,b,c,d) \
854 CHECK_BIDIR2(b,c,d,a) \
855 CHECK_BIDIR2(c,d,a,b) \
856 CHECK_BIDIR2(d,a,b,c)
858 static void ALWAYS_INLINE x264_me_refine_bidir( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2, int rd )
860 static const int pixel_mv_offs[] = { 0, 4, 4*8, 0 };
861 int16_t *cache0_mv = h->mb.cache.mv[0][x264_scan8[i8*4]];
862 int16_t *cache0_mv2 = cache0_mv + pixel_mv_offs[m0->i_pixel];
863 int16_t *cache1_mv = h->mb.cache.mv[1][x264_scan8[i8*4]];
864 int16_t *cache1_mv2 = cache1_mv + pixel_mv_offs[m0->i_pixel];
865 const int i_pixel = m0->i_pixel;
866 const int bw = x264_pixel_size[i_pixel].w;
867 const int bh = x264_pixel_size[i_pixel].h;
868 const int16_t *p_cost_m0x = m0->p_cost_mv - x264_clip3( m0->mvp[0], h->mb.mv_min_spel[0], h->mb.mv_max_spel[0] );
869 const int16_t *p_cost_m0y = m0->p_cost_mv - x264_clip3( m0->mvp[1], h->mb.mv_min_spel[0], h->mb.mv_max_spel[0] );
870 const int16_t *p_cost_m1x = m1->p_cost_mv - x264_clip3( m1->mvp[0], h->mb.mv_min_spel[0], h->mb.mv_max_spel[0] );
871 const int16_t *p_cost_m1y = m1->p_cost_mv - x264_clip3( m1->mvp[1], h->mb.mv_min_spel[0], h->mb.mv_max_spel[0] );
872 DECLARE_ALIGNED_16( uint8_t pix0[9][16*16] );
873 DECLARE_ALIGNED_16( uint8_t pix1[9][16*16] );
874 DECLARE_ALIGNED_16( uint8_t pix[16*16] );
879 int bm0x = m0->mv[0], om0x = bm0x;
880 int bm0y = m0->mv[1], om0y = bm0y;
881 int bm1x = m1->mv[0], om1x = bm1x;
882 int bm1y = m1->mv[1], om1y = bm1y;
883 int bcost = COST_MAX;
885 uint64_t bcostrd = COST_MAX64;
887 /* each byte of visited represents 8 possible m1y positions, so a 4D array isn't needed */
888 DECLARE_ALIGNED_16( uint8_t visited[8][8][8] );
890 if( bm0y > h->mb.mv_max_spel[1] - 8 ||
891 bm1y > h->mb.mv_max_spel[1] - 8 )
894 h->mc.memzero_aligned( visited, sizeof(visited) );
897 CHECK_BIDIR( 0, 0, 0, 0 );
899 for( pass = 0; pass < 8; pass++ )
901 /* check all mv pairs that differ in at most 2 components from the current mvs. */
902 /* doesn't do chroma ME. this probably doesn't matter, as the gains
903 * from bidir ME are the same with and without chroma ME. */
910 CHECK_BIDIR8( 0, 0, 0, 1 );
911 CHECK_BIDIR8( 0, 0, 1, 1 );
912 CHECK_BIDIR2( 0, 1, 0, 1 );
913 CHECK_BIDIR2( 1, 0, 1, 0 );
914 CHECK_BIDIR8( 0, 0,-1, 1 );
915 CHECK_BIDIR2( 0,-1, 0, 1 );
916 CHECK_BIDIR2(-1, 0, 1, 0 );
918 if( om0x == bm0x && om0y == bm0y && om1x == bm1x && om1y == bm1y )
934 void x264_me_refine_bidir_satd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight )
936 x264_me_refine_bidir( h, m0, m1, i_weight, 0, 0, 0 );
939 void x264_me_refine_bidir_rd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2 )
941 x264_me_refine_bidir( h, m0, m1, i_weight, i8, i_lambda2, 1 );
945 #define COST_MV_SATD( mx, my, dst, avoid_mvp ) \
947 if( !avoid_mvp || !(mx == pmx && my == pmy) ) \
950 uint8_t *src = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], mx, my, bw*4, bh*4 ); \
951 dst = h->pixf.mbcmp_unaligned[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
952 + p_cost_mvx[mx] + p_cost_mvy[my]; \
953 COPY1_IF_LT( bsatd, dst ); \
959 #define COST_MV_RD( mx, my, satd, do_dir, mdir ) \
961 if( satd <= bsatd * SATD_THRESH ) \
964 *(uint32_t*)cache_mv = *(uint32_t*)cache_mv2 = pack16to32_mask(mx,my); \
965 cost = x264_rd_cost_part( h, i_lambda2, i4, m->i_pixel ); \
966 COPY4_IF_LT( bcost, cost, bmx, mx, bmy, my, dir, do_dir?mdir:dir ); \
970 void x264_me_refine_qpel_rd( x264_t *h, x264_me_t *m, int i_lambda2, int i4, int i_list )
972 // don't have to fill the whole mv cache rectangle
973 static const int pixel_mv_offs[] = { 0, 4, 4*8, 0, 2, 2*8, 0 };
974 int16_t *cache_mv = h->mb.cache.mv[i_list][x264_scan8[i4]];
975 int16_t *cache_mv2 = cache_mv + pixel_mv_offs[m->i_pixel];
976 const int16_t *p_cost_mvx, *p_cost_mvy;
977 const int bw = x264_pixel_size[m->i_pixel].w>>2;
978 const int bh = x264_pixel_size[m->i_pixel].h>>2;
979 const int i_pixel = m->i_pixel;
981 DECLARE_ALIGNED_16( uint8_t pix[16*16] );
982 uint64_t bcost = m->i_pixel == PIXEL_16x16 ? m->cost : COST_MAX64;
993 if( m->i_pixel != PIXEL_16x16 && i4 != 0 )
994 x264_mb_predict_mv( h, i_list, i4, bw, m->mvp );
997 p_cost_mvx = m->p_cost_mv - pmx;
998 p_cost_mvy = m->p_cost_mv - pmy;
999 COST_MV_SATD( bmx, bmy, bsatd, 0 );
1000 COST_MV_RD( bmx, bmy, 0, 0, 0 );
1002 /* check the predicted mv */
1003 if( (bmx != pmx || bmy != pmy)
1004 && pmx >= h->mb.mv_min_spel[0] && pmx <= h->mb.mv_max_spel[0]
1005 && pmy >= h->mb.mv_min_spel[1] && pmy <= h->mb.mv_max_spel[1] )
1007 COST_MV_SATD( pmx, pmy, satd, 0 );
1008 COST_MV_RD( pmx, pmy, satd, 0,0 );
1009 /* The hex motion search is guaranteed to not repeat the center candidate,
1010 * so if pmv is chosen, set the "MV to avoid checking" to bmv instead. */
1011 if( bmx == pmx && bmy == pmy )
1018 /* subpel hex search, same pattern as ME HEX. */
1022 for( j=0; j<6; j++ ) COST_MV_SATD( omx + hex2[j+1][0], omy + hex2[j+1][1], satds[j], 1 );
1023 for( j=0; j<6; j++ ) COST_MV_RD ( omx + hex2[j+1][0], omy + hex2[j+1][1], satds[j], 1,j );
1027 /* half hexagon, not overlapping the previous iteration */
1028 for( i = 1; i < 10; i++ )
1030 const int odir = mod6m1[dir+1];
1031 if( bmy > h->mb.mv_max_spel[1] - 2 ||
1032 bmy < h->mb.mv_min_spel[1] - 2 )
1037 for( j=0; j<3; j++ ) COST_MV_SATD( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satds[j], 1 );
1038 for( j=0; j<3; j++ ) COST_MV_RD ( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satds[j], 1, odir-1+j );
1044 /* square refine, same as pattern as ME HEX. */
1047 for( i=0; i<8; i++ ) COST_MV_SATD( omx + square1[i][0], omy + square1[i][1], satds[i], 1 );
1048 for( i=0; i<8; i++ ) COST_MV_RD ( omx + square1[i][0], omy + square1[i][1], satds[i], 0,0 );
1050 bmy = x264_clip3( bmy, h->mb.mv_min_spel[1], h->mb.mv_max_spel[1] );
1054 x264_macroblock_cache_mv ( h, block_idx_x[i4], block_idx_y[i4], bw, bh, i_list, pack16to32_mask(bmx, bmy) );
1055 x264_macroblock_cache_mvd( h, block_idx_x[i4], block_idx_y[i4], bw, bh, i_list, pack16to32_mask(bmx - m->mvp[0], bmy - m->mvp[1]) );