1 /*****************************************************************************
2 * mc.c: motion compensation
3 *****************************************************************************
4 * Copyright (C) 2003-2010 x264 project
6 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
7 * Loren Merritt <lorenm@u.washington.edu>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 * This program is also available under a commercial proprietary license.
24 * For more information, contact us at licensing@x264.com.
25 *****************************************************************************/
40 static inline void pixel_avg( pixel *dst, int i_dst_stride,
41 pixel *src1, int i_src1_stride,
42 pixel *src2, int i_src2_stride,
43 int i_width, int i_height )
45 for( int y = 0; y < i_height; y++ )
47 for( int x = 0; x < i_width; x++ )
48 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
50 src1 += i_src1_stride;
51 src2 += i_src2_stride;
55 static inline void pixel_avg_wxh( pixel *dst, int i_dst, pixel *src1, int i_src1, pixel *src2, int i_src2, int width, int height )
57 for( int y = 0; y < height; y++ )
59 for( int x = 0; x < width; x++ )
60 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
67 /* Implicit weighted bipred only:
68 * assumes log2_denom = 5, offset = 0, weight1 + weight2 = 64 */
69 static inline void pixel_avg_weight_wxh( pixel *dst, int i_dst, pixel *src1, int i_src1, pixel *src2, int i_src2, int width, int height, int i_weight1 )
71 const int i_weight2 = 64 - i_weight1;
72 for( int y = 0; y<height; y++, dst += i_dst, src1 += i_src1, src2 += i_src2 )
73 for( int x = 0; x<width; x++ )
74 dst[x] = x264_clip_pixel( (src1[x]*i_weight1 + src2[x]*i_weight2 + (1<<5)) >> 6 );
78 #define PIXEL_AVG_C( name, width, height ) \
79 static void name( pixel *pix1, int i_stride_pix1, \
80 pixel *pix2, int i_stride_pix2, \
81 pixel *pix3, int i_stride_pix3, int weight ) \
84 pixel_avg_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height ); \
86 pixel_avg_weight_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, pix3, i_stride_pix3, width, height, weight ); \
88 PIXEL_AVG_C( pixel_avg_16x16, 16, 16 )
89 PIXEL_AVG_C( pixel_avg_16x8, 16, 8 )
90 PIXEL_AVG_C( pixel_avg_8x16, 8, 16 )
91 PIXEL_AVG_C( pixel_avg_8x8, 8, 8 )
92 PIXEL_AVG_C( pixel_avg_8x4, 8, 4 )
93 PIXEL_AVG_C( pixel_avg_4x8, 4, 8 )
94 PIXEL_AVG_C( pixel_avg_4x4, 4, 4 )
95 PIXEL_AVG_C( pixel_avg_4x2, 4, 2 )
96 PIXEL_AVG_C( pixel_avg_2x4, 2, 4 )
97 PIXEL_AVG_C( pixel_avg_2x2, 2, 2 )
99 static void x264_weight_cache( x264_t *h, x264_weight_t *w )
101 w->weightfn = h->mc.weight;
103 #define opscale(x) dst[x] = x264_clip_pixel( ((src[x] * scale + (1<<(denom - 1))) >> denom) + offset )
104 #define opscale_noden(x) dst[x] = x264_clip_pixel( src[x] * scale + offset )
105 static void mc_weight( pixel *dst, int i_dst_stride, pixel *src, int i_src_stride, const x264_weight_t *weight, int i_width, int i_height )
107 int offset = weight->i_offset << (BIT_DEPTH-8);
108 int scale = weight->i_scale;
109 int denom = weight->i_denom;
112 for( int y = 0; y < i_height; y++, dst += i_dst_stride, src += i_src_stride )
113 for( int x = 0; x < i_width; x++ )
118 for( int y = 0; y < i_height; y++, dst += i_dst_stride, src += i_src_stride )
119 for( int x = 0; x < i_width; x++ )
124 #define MC_WEIGHT_C( name, width ) \
125 static void name( pixel *dst, int i_dst_stride, pixel *src, int i_src_stride, const x264_weight_t *weight, int height ) \
127 mc_weight( dst, i_dst_stride, src, i_src_stride, weight, width, height );\
130 MC_WEIGHT_C( mc_weight_w20, 20 )
131 MC_WEIGHT_C( mc_weight_w16, 16 )
132 MC_WEIGHT_C( mc_weight_w12, 12 )
133 MC_WEIGHT_C( mc_weight_w8, 8 )
134 MC_WEIGHT_C( mc_weight_w4, 4 )
135 MC_WEIGHT_C( mc_weight_w2, 2 )
137 static weight_fn_t x264_mc_weight_wtab[6] =
146 const x264_weight_t weight_none[3] = { {{0}} };
147 static void mc_copy( pixel *src, int i_src_stride, pixel *dst, int i_dst_stride, int i_width, int i_height )
149 for( int y = 0; y < i_height; y++ )
151 memcpy( dst, src, i_width * sizeof(pixel) );
158 #define TAPFILTER(pix, d) ((pix)[x-2*d] + (pix)[x+3*d] - 5*((pix)[x-d] + (pix)[x+2*d]) + 20*((pix)[x] + (pix)[x+d]))
159 static void hpel_filter( pixel *dsth, pixel *dstv, pixel *dstc, pixel *src,
160 int stride, int width, int height, int16_t *buf )
162 const int pad = (BIT_DEPTH > 9) ? (-10 * PIXEL_MAX) : 0;
163 for( int y = 0; y < height; y++ )
165 for( int x = -2; x < width+3; x++ )
167 int v = TAPFILTER(src,stride);
168 dstv[x] = x264_clip_pixel( (v + 16) >> 5 );
169 /* transform v for storage in a 16-bit integer */
172 for( int x = 0; x < width; x++ )
173 dstc[x] = x264_clip_pixel( (TAPFILTER(buf+2,1) - 32*pad + 512) >> 10 );
174 for( int x = 0; x < width; x++ )
175 dsth[x] = x264_clip_pixel( (TAPFILTER(src,1) + 16) >> 5 );
183 static const uint8_t hpel_ref0[16] = {0,1,1,1,0,1,1,1,2,3,3,3,0,1,1,1};
184 static const uint8_t hpel_ref1[16] = {0,0,0,0,2,2,3,2,2,2,3,2,2,2,3,2};
186 static void mc_luma( pixel *dst, int i_dst_stride,
187 pixel *src[4], int i_src_stride,
189 int i_width, int i_height, const x264_weight_t *weight )
191 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
192 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
193 pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
195 if( qpel_idx & 5 ) /* qpel interpolation needed */
197 pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
198 pixel_avg( dst, i_dst_stride, src1, i_src_stride,
199 src2, i_src_stride, i_width, i_height );
200 if( weight->weightfn )
201 mc_weight( dst, i_dst_stride, dst, i_dst_stride, weight, i_width, i_height );
203 else if( weight->weightfn )
204 mc_weight( dst, i_dst_stride, src1, i_src_stride, weight, i_width, i_height );
206 mc_copy( src1, i_src_stride, dst, i_dst_stride, i_width, i_height );
209 static pixel *get_ref( pixel *dst, int *i_dst_stride,
210 pixel *src[4], int i_src_stride,
212 int i_width, int i_height, const x264_weight_t *weight )
214 int qpel_idx = ((mvy&3)<<2) + (mvx&3);
215 int offset = (mvy>>2)*i_src_stride + (mvx>>2);
216 pixel *src1 = src[hpel_ref0[qpel_idx]] + offset + ((mvy&3) == 3) * i_src_stride;
218 if( qpel_idx & 5 ) /* qpel interpolation needed */
220 pixel *src2 = src[hpel_ref1[qpel_idx]] + offset + ((mvx&3) == 3);
221 pixel_avg( dst, *i_dst_stride, src1, i_src_stride,
222 src2, i_src_stride, i_width, i_height );
223 if( weight->weightfn )
224 mc_weight( dst, *i_dst_stride, dst, *i_dst_stride, weight, i_width, i_height );
227 else if( weight->weightfn )
229 mc_weight( dst, *i_dst_stride, src1, i_src_stride, weight, i_width, i_height );
234 *i_dst_stride = i_src_stride;
239 /* full chroma mc (ie until 1/8 pixel)*/
240 static void mc_chroma( pixel *dstu, pixel *dstv, int i_dst_stride,
241 pixel *src, int i_src_stride,
243 int i_width, int i_height )
249 int cA = (8-d8x)*(8-d8y);
250 int cB = d8x *(8-d8y);
251 int cC = (8-d8x)*d8y;
254 src += (mvy >> 3) * i_src_stride + (mvx >> 3)*2;
255 srcp = &src[i_src_stride];
257 for( int y = 0; y < i_height; y++ )
259 for( int x = 0; x < i_width; x++ )
261 dstu[x] = ( cA*src[2*x] + cB*src[2*x+2] +
262 cC*srcp[2*x] + cD*srcp[2*x+2] + 32 ) >> 6;
263 dstv[x] = ( cA*src[2*x+1] + cB*src[2*x+3] +
264 cC*srcp[2*x+1] + cD*srcp[2*x+3] + 32 ) >> 6;
266 dstu += i_dst_stride;
267 dstv += i_dst_stride;
269 srcp += i_src_stride;
274 static void mc_copy_w##W( pixel *dst, int i_dst, pixel *src, int i_src, int i_height ) \
276 mc_copy( src, i_src, dst, i_dst, W, i_height ); \
282 void x264_plane_copy_c( pixel *dst, int i_dst,
283 pixel *src, int i_src, int w, int h )
287 memcpy( dst, src, w * sizeof(pixel) );
293 void x264_plane_copy_interleave_c( pixel *dst, int i_dst,
294 pixel *srcu, int i_srcu,
295 pixel *srcv, int i_srcv, int w, int h )
297 for( int y=0; y<h; y++, dst+=i_dst, srcu+=i_srcu, srcv+=i_srcv )
298 for( int x=0; x<w; x++ )
301 dst[2*x+1] = srcv[x];
305 void x264_plane_copy_deinterleave_c( pixel *dstu, int i_dstu,
306 pixel *dstv, int i_dstv,
307 pixel *src, int i_src, int w, int h )
309 for( int y=0; y<h; y++, dstu+=i_dstu, dstv+=i_dstv, src+=i_src )
310 for( int x=0; x<w; x++ )
313 dstv[x] = src[2*x+1];
317 static void store_interleave_8x8x2( pixel *dst, int i_dst, pixel *srcu, pixel *srcv )
319 for( int y=0; y<8; y++, dst+=i_dst, srcu+=FDEC_STRIDE, srcv+=FDEC_STRIDE )
320 for( int x=0; x<8; x++ )
323 dst[2*x+1] = srcv[x];
327 static void load_deinterleave_8x8x2_fenc( pixel *dst, pixel *src, int i_src )
329 x264_plane_copy_deinterleave_c( dst, FENC_STRIDE, dst+FENC_STRIDE/2, FENC_STRIDE, src, i_src, 8, 8 );
332 static void load_deinterleave_8x8x2_fdec( pixel *dst, pixel *src, int i_src )
334 x264_plane_copy_deinterleave_c( dst, FDEC_STRIDE, dst+FDEC_STRIDE/2, FDEC_STRIDE, src, i_src, 8, 8 );
337 static void prefetch_fenc_null( pixel *pix_y, int stride_y,
338 pixel *pix_uv, int stride_uv, int mb_x )
341 static void prefetch_ref_null( pixel *pix, int stride, int parity )
344 static void memzero_aligned( void * dst, int n )
349 static void integral_init4h( uint16_t *sum, pixel *pix, int stride )
351 int v = pix[0]+pix[1]+pix[2]+pix[3];
352 for( int x = 0; x < stride-4; x++ )
354 sum[x] = v + sum[x-stride];
355 v += pix[x+4] - pix[x];
359 static void integral_init8h( uint16_t *sum, pixel *pix, int stride )
361 int v = pix[0]+pix[1]+pix[2]+pix[3]+pix[4]+pix[5]+pix[6]+pix[7];
362 for( int x = 0; x < stride-8; x++ )
364 sum[x] = v + sum[x-stride];
365 v += pix[x+8] - pix[x];
369 static void integral_init4v( uint16_t *sum8, uint16_t *sum4, int stride )
371 for( int x = 0; x < stride-8; x++ )
372 sum4[x] = sum8[x+4*stride] - sum8[x];
373 for( int x = 0; x < stride-8; x++ )
374 sum8[x] = sum8[x+8*stride] + sum8[x+8*stride+4] - sum8[x] - sum8[x+4];
377 static void integral_init8v( uint16_t *sum8, int stride )
379 for( int x = 0; x < stride-8; x++ )
380 sum8[x] = sum8[x+8*stride] - sum8[x];
383 void x264_frame_init_lowres( x264_t *h, x264_frame_t *frame )
385 pixel *src = frame->plane[0];
386 int i_stride = frame->i_stride[0];
387 int i_height = frame->i_lines[0];
388 int i_width = frame->i_width[0];
390 // duplicate last row and column so that their interpolation doesn't have to be special-cased
391 for( int y = 0; y < i_height; y++ )
392 src[i_width+y*i_stride] = src[i_width-1+y*i_stride];
393 memcpy( src+i_stride*i_height, src+i_stride*(i_height-1), (i_width+1) * sizeof(pixel) );
394 h->mc.frame_init_lowres_core( src, frame->lowres[0], frame->lowres[1], frame->lowres[2], frame->lowres[3],
395 i_stride, frame->i_stride_lowres, frame->i_width_lowres, frame->i_lines_lowres );
396 x264_frame_expand_border_lowres( frame );
398 memset( frame->i_cost_est, -1, sizeof(frame->i_cost_est) );
400 for( int y = 0; y < h->param.i_bframe + 2; y++ )
401 for( int x = 0; x < h->param.i_bframe + 2; x++ )
402 frame->i_row_satds[y][x][0] = -1;
404 for( int y = 0; y <= !!h->param.i_bframe; y++ )
405 for( int x = 0; x <= h->param.i_bframe; x++ )
406 frame->lowres_mvs[y][x][0][0] = 0x7FFF;
409 static void frame_init_lowres_core( pixel *src0, pixel *dst0, pixel *dsth, pixel *dstv, pixel *dstc,
410 int src_stride, int dst_stride, int width, int height )
412 for( int y = 0; y < height; y++ )
414 pixel *src1 = src0+src_stride;
415 pixel *src2 = src1+src_stride;
416 for( int x = 0; x<width; x++ )
418 // slower than naive bilinear, but matches asm
419 #define FILTER(a,b,c,d) ((((a+b+1)>>1)+((c+d+1)>>1)+1)>>1)
420 dst0[x] = FILTER(src0[2*x ], src1[2*x ], src0[2*x+1], src1[2*x+1]);
421 dsth[x] = FILTER(src0[2*x+1], src1[2*x+1], src0[2*x+2], src1[2*x+2]);
422 dstv[x] = FILTER(src1[2*x ], src2[2*x ], src1[2*x+1], src2[2*x+1]);
423 dstc[x] = FILTER(src1[2*x+1], src2[2*x+1], src1[2*x+2], src2[2*x+2]);
426 src0 += src_stride*2;
434 /* Estimate the total amount of influence on future quality that could be had if we
435 * were to improve the reference samples used to inter predict any given macroblock. */
436 static void mbtree_propagate_cost( int *dst, uint16_t *propagate_in, uint16_t *intra_costs,
437 uint16_t *inter_costs, uint16_t *inv_qscales, float *fps_factor, int len )
439 float fps = *fps_factor / 256.f;
440 for( int i = 0; i < len; i++ )
442 float intra_cost = intra_costs[i] * inv_qscales[i];
443 float propagate_amount = propagate_in[i] + intra_cost*fps;
444 float propagate_num = intra_costs[i] - (inter_costs[i] & LOWRES_COST_MASK);
445 float propagate_denom = intra_costs[i];
446 dst[i] = (int)(propagate_amount * propagate_num / propagate_denom + 0.5f);
450 void x264_mc_init( int cpu, x264_mc_functions_t *pf )
452 pf->mc_luma = mc_luma;
453 pf->get_ref = get_ref;
454 pf->mc_chroma = mc_chroma;
456 pf->avg[PIXEL_16x16]= pixel_avg_16x16;
457 pf->avg[PIXEL_16x8] = pixel_avg_16x8;
458 pf->avg[PIXEL_8x16] = pixel_avg_8x16;
459 pf->avg[PIXEL_8x8] = pixel_avg_8x8;
460 pf->avg[PIXEL_8x4] = pixel_avg_8x4;
461 pf->avg[PIXEL_4x8] = pixel_avg_4x8;
462 pf->avg[PIXEL_4x4] = pixel_avg_4x4;
463 pf->avg[PIXEL_4x2] = pixel_avg_4x2;
464 pf->avg[PIXEL_2x4] = pixel_avg_2x4;
465 pf->avg[PIXEL_2x2] = pixel_avg_2x2;
467 pf->weight = x264_mc_weight_wtab;
468 pf->offsetadd = x264_mc_weight_wtab;
469 pf->offsetsub = x264_mc_weight_wtab;
470 pf->weight_cache = x264_weight_cache;
472 pf->copy_16x16_unaligned = mc_copy_w16;
473 pf->copy[PIXEL_16x16] = mc_copy_w16;
474 pf->copy[PIXEL_8x8] = mc_copy_w8;
475 pf->copy[PIXEL_4x4] = mc_copy_w4;
477 pf->store_interleave_8x8x2 = store_interleave_8x8x2;
478 pf->load_deinterleave_8x8x2_fenc = load_deinterleave_8x8x2_fenc;
479 pf->load_deinterleave_8x8x2_fdec = load_deinterleave_8x8x2_fdec;
481 pf->plane_copy = x264_plane_copy_c;
482 pf->plane_copy_interleave = x264_plane_copy_interleave_c;
483 pf->plane_copy_deinterleave = x264_plane_copy_deinterleave_c;
485 pf->hpel_filter = hpel_filter;
487 pf->prefetch_fenc = prefetch_fenc_null;
488 pf->prefetch_ref = prefetch_ref_null;
489 pf->memcpy_aligned = memcpy;
490 pf->memzero_aligned = memzero_aligned;
491 pf->frame_init_lowres_core = frame_init_lowres_core;
493 pf->integral_init4h = integral_init4h;
494 pf->integral_init8h = integral_init8h;
495 pf->integral_init4v = integral_init4v;
496 pf->integral_init8v = integral_init8v;
498 pf->mbtree_propagate_cost = mbtree_propagate_cost;
501 x264_mc_init_mmx( cpu, pf );
504 if( cpu&X264_CPU_ALTIVEC )
505 x264_mc_altivec_init( pf );
508 x264_mc_init_arm( cpu, pf );
512 void x264_frame_filter( x264_t *h, x264_frame_t *frame, int mb_y, int b_end )
514 const int b_interlaced = h->sh.b_mbaff;
515 const int stride = frame->i_stride[0] << b_interlaced;
516 const int width = frame->i_width[0];
517 int start = (mb_y*16 >> b_interlaced) - 8; // buffer = 4 for deblock + 3 for 6tap, rounded to 8
518 int height = ((b_end ? frame->i_lines[0] : mb_y*16) >> b_interlaced) + 8;
519 int offs = start*stride - 8; // buffer = 3 for 6tap, aligned to 8 for simd
521 if( mb_y & b_interlaced )
524 for( int y = 0; y <= b_interlaced; y++, offs += frame->i_stride[0] )
527 frame->filtered[1] + offs,
528 frame->filtered[2] + offs,
529 frame->filtered[3] + offs,
530 frame->plane[0] + offs,
531 stride, width + 16, height - start,
535 /* generate integral image:
536 * frame->integral contains 2 planes. in the upper plane, each element is
537 * the sum of an 8x8 pixel region with top-left corner on that point.
538 * in the lower plane, 4x4 sums (needed only with --partitions p4x4). */
540 if( frame->integral )
544 memset( frame->integral - PADV * stride - PADH, 0, stride * sizeof(uint16_t) );
549 for( int y = start; y < height; y++ )
551 pixel *pix = frame->plane[0] + y * stride - PADH;
552 uint16_t *sum8 = frame->integral + (y+1) * stride - PADH;
554 if( h->frames.b_have_sub8x8_esa )
556 h->mc.integral_init4h( sum8, pix, stride );
558 sum4 = sum8 + stride * (frame->i_lines[0] + PADV*2);
560 h->mc.integral_init4v( sum8, sum4, stride );
564 h->mc.integral_init8h( sum8, pix, stride );
566 h->mc.integral_init8v( sum8-8*stride, stride );