1 /*****************************************************************************
2 * mc.c: h264 encoder library (Motion Compensation)
3 *****************************************************************************
4 * Copyright (C) 2003 Laurent Aimar
5 * $Id: mc.c,v 1.1 2004/06/03 19:27:07 fenrir Exp $
7 * Authors: Laurent Aimar <fenrir@via.ecp.fr>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA.
22 *****************************************************************************/
49 static inline int x264_tapfilter( uint8_t *pix, int i_pix_next )
51 return pix[-2*i_pix_next] - 5*pix[-1*i_pix_next] + 20*(pix[0] + pix[1*i_pix_next]) - 5*pix[ 2*i_pix_next] + pix[ 3*i_pix_next];
53 static inline int x264_tapfilter1( uint8_t *pix )
55 return pix[-2] - 5*pix[-1] + 20*(pix[0] + pix[1]) - 5*pix[ 2] + pix[ 3];
58 static inline void pixel_avg( uint8_t *dst, int i_dst_stride,
59 uint8_t *src1, int i_src1_stride,
60 uint8_t *src2, int i_src2_stride,
61 int i_width, int i_height )
64 for( y = 0; y < i_height; y++ )
66 for( x = 0; x < i_width; x++ )
68 dst[x] = ( src1[x] + src2[x] + 1 ) >> 1;
71 src1 += i_src1_stride;
72 src2 += i_src2_stride;
76 static inline void pixel_avg_wxh( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int width, int height )
79 for( y = 0; y < height; y++ )
81 for( x = 0; x < width; x++ )
83 dst[x] = ( dst[x] + src[x] + 1 ) >> 1;
90 #define PIXEL_AVG_C( name, width, height ) \
91 static void name( uint8_t *pix1, int i_stride_pix1, \
92 uint8_t *pix2, int i_stride_pix2 ) \
94 pixel_avg_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height ); \
96 PIXEL_AVG_C( pixel_avg_16x16, 16, 16 )
97 PIXEL_AVG_C( pixel_avg_16x8, 16, 8 )
98 PIXEL_AVG_C( pixel_avg_8x16, 8, 16 )
99 PIXEL_AVG_C( pixel_avg_8x8, 8, 8 )
100 PIXEL_AVG_C( pixel_avg_8x4, 8, 4 )
101 PIXEL_AVG_C( pixel_avg_4x8, 4, 8 )
102 PIXEL_AVG_C( pixel_avg_4x4, 4, 4 )
103 PIXEL_AVG_C( pixel_avg_4x2, 4, 2 )
104 PIXEL_AVG_C( pixel_avg_2x4, 2, 4 )
105 PIXEL_AVG_C( pixel_avg_2x2, 2, 2 )
108 /* Implicit weighted bipred only:
109 * assumes log2_denom = 5, offset = 0, weight1 + weight2 = 64 */
110 #define op_scale2(x) dst[x] = x264_clip_uint8( (dst[x]*i_weight1 + src[x]*i_weight2 + (1<<5)) >> 6 )
111 static inline void pixel_avg_weight_wxh( uint8_t *dst, int i_dst, uint8_t *src, int i_src, int width, int height, int i_weight1 ){
113 const int i_weight2 = 64 - i_weight1;
114 for(y=0; y<height; y++, dst += i_dst, src += i_src){
117 if(width==2) continue;
120 if(width==4) continue;
125 if(width==8) continue;
137 #define PIXEL_AVG_WEIGHT_C( width, height ) \
138 static void pixel_avg_weight_##width##x##height( \
139 uint8_t *pix1, int i_stride_pix1, \
140 uint8_t *pix2, int i_stride_pix2, int i_weight1 ) \
142 pixel_avg_weight_wxh( pix1, i_stride_pix1, pix2, i_stride_pix2, width, height, i_weight1 ); \
145 PIXEL_AVG_WEIGHT_C(16,16)
146 PIXEL_AVG_WEIGHT_C(16,8)
147 PIXEL_AVG_WEIGHT_C(8,16)
148 PIXEL_AVG_WEIGHT_C(8,8)
149 PIXEL_AVG_WEIGHT_C(8,4)
150 PIXEL_AVG_WEIGHT_C(4,8)
151 PIXEL_AVG_WEIGHT_C(4,4)
152 PIXEL_AVG_WEIGHT_C(4,2)
153 PIXEL_AVG_WEIGHT_C(2,4)
154 PIXEL_AVG_WEIGHT_C(2,2)
156 #undef PIXEL_AVG_WEIGHT_C
158 typedef void (*pf_mc_t)(uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height );
160 static void mc_copy( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
164 for( y = 0; y < i_height; y++ )
166 memcpy( dst, src, i_width );
172 static inline void mc_hh( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
176 for( y = 0; y < i_height; y++ )
178 for( x = 0; x < i_width; x++ )
180 dst[x] = x264_mc_clip1( ( x264_tapfilter1( &src[x] ) + 16 ) >> 5 );
186 static inline void mc_hv( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
190 for( y = 0; y < i_height; y++ )
192 for( x = 0; x < i_width; x++ )
194 dst[x] = x264_mc_clip1( ( x264_tapfilter( &src[x], i_src_stride ) + 16 ) >> 5 );
200 static inline void mc_hc( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
206 for( x = 0; x < i_width; x++ )
213 tap[0] = x264_tapfilter1( &pix[-2*i_src_stride] );
214 tap[1] = x264_tapfilter1( &pix[-1*i_src_stride] );
215 tap[2] = x264_tapfilter1( &pix[ 0*i_src_stride] );
216 tap[3] = x264_tapfilter1( &pix[ 1*i_src_stride] );
217 tap[4] = x264_tapfilter1( &pix[ 2*i_src_stride] );
219 for( y = 0; y < i_height; y++ )
221 tap[5] = x264_tapfilter1( &pix[ 3*i_src_stride] );
223 *out = x264_mc_clip1( ( tap[0] - 5*tap[1] + 20 * tap[2] + 20 * tap[3] -5*tap[4] + tap[5] + 512 ) >> 10 );
239 static void mc_xy10( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
242 mc_hh( src, i_src_stride, tmp, i_width, i_width, i_height );
243 pixel_avg( dst, i_dst_stride, src, i_src_stride, tmp, i_width, i_width, i_height );
245 static void mc_xy30( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
248 mc_hh( src, i_src_stride, tmp, i_width, i_width, i_height );
249 pixel_avg( dst, i_dst_stride, src+1, i_src_stride, tmp, i_width, i_width, i_height );
252 static void mc_xy01( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
255 mc_hv( src, i_src_stride, tmp, i_width, i_width, i_height );
256 pixel_avg( dst, i_dst_stride, src, i_src_stride, tmp, i_width, i_width, i_height );
258 static void mc_xy03( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
261 mc_hv( src, i_src_stride, tmp, i_width, i_width, i_height );
262 pixel_avg( dst, i_dst_stride, src+i_src_stride, i_src_stride, tmp, i_width, i_width, i_height );
265 static void mc_xy11( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
270 mc_hv( src, i_src_stride, tmp1, i_width, i_width, i_height );
271 mc_hh( src, i_src_stride, tmp2, i_width, i_width, i_height );
272 pixel_avg( dst, i_dst_stride, tmp1, i_width, tmp2, i_width, i_width, i_height );
274 static void mc_xy31( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
279 mc_hv( src+1, i_src_stride, tmp1, i_width, i_width, i_height );
280 mc_hh( src, i_src_stride, tmp2, i_width, i_width, i_height );
281 pixel_avg( dst, i_dst_stride, tmp1, i_width, tmp2, i_width, i_width, i_height );
283 static void mc_xy13( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
288 mc_hv( src, i_src_stride, tmp1, i_width, i_width, i_height );
289 mc_hh( src+i_src_stride, i_src_stride, tmp2, i_width, i_width, i_height );
290 pixel_avg( dst, i_dst_stride, tmp1, i_width, tmp2, i_width, i_width, i_height );
292 static void mc_xy33( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
297 mc_hv( src+1, i_src_stride, tmp1, i_width, i_width, i_height );
298 mc_hh( src+i_src_stride, i_src_stride, tmp2, i_width, i_width, i_height );
299 pixel_avg( dst, i_dst_stride, tmp1, i_width, tmp2, i_width, i_width, i_height );
301 static void mc_xy21( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
306 mc_hc( src, i_src_stride, tmp1, i_width, i_width, i_height );
307 mc_hh( src, i_src_stride, tmp2, i_width, i_width, i_height );
308 pixel_avg( dst, i_dst_stride, tmp1, i_width, tmp2, i_width, i_width, i_height );
310 static void mc_xy12( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
315 mc_hc( src, i_src_stride, tmp1, i_width, i_width, i_height );
316 mc_hv( src, i_src_stride, tmp2, i_width, i_width, i_height );
317 pixel_avg( dst, i_dst_stride, tmp1, i_width, tmp2, i_width, i_width, i_height );
319 static void mc_xy32( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
324 mc_hc( src, i_src_stride, tmp1, i_width, i_width, i_height );
325 mc_hv( src+1, i_src_stride, tmp2, i_width, i_width, i_height );
326 pixel_avg( dst, i_dst_stride, tmp1, i_width, tmp2, i_width, i_width, i_height );
328 static void mc_xy23( uint8_t *src, int i_src_stride, uint8_t *dst, int i_dst_stride, int i_width, int i_height )
333 mc_hc( src, i_src_stride, tmp1, i_width, i_width, i_height );
334 mc_hh( src+i_src_stride, i_src_stride, tmp2, i_width, i_width, i_height );
335 pixel_avg( dst, i_dst_stride, tmp1, i_width, tmp2, i_width, i_width, i_height );
338 static void motion_compensation_luma( uint8_t *src, int i_src_stride,
339 uint8_t *dst, int i_dst_stride,
341 int i_width, int i_height )
343 static pf_mc_t pf_mc[4][4] = /*XXX [dqy][dqx] */
345 { mc_copy, mc_xy10, mc_hh, mc_xy30 },
346 { mc_xy01, mc_xy11, mc_xy21, mc_xy31 },
347 { mc_hv, mc_xy12, mc_hc, mc_xy32 },
348 { mc_xy03, mc_xy13, mc_xy23, mc_xy33 },
351 src += (mvy >> 2) * i_src_stride + (mvx >> 2);
352 pf_mc[mvy&0x03][mvx&0x03]( src, i_src_stride, dst, i_dst_stride, i_width, i_height );
356 static void mc_luma( uint8_t *src[4], int i_src_stride,
357 uint8_t *dst, int i_dst_stride,
359 int i_width, int i_height )
361 uint8_t *src1, *src2;
363 int correction = (mvx&1) && (mvy&1) && ((mvx&2) ^ (mvy&2));
365 int hpel1y = (mvy+1-correction)>>1;
366 int filter1 = (hpel1x & 1) + ( (hpel1y & 1) << 1 );
368 src1 = src[filter1] + (hpel1y >> 1) * i_src_stride + (hpel1x >> 1);
370 if ( (mvx|mvy) & 1 ) /* qpel interpolation needed */
372 int hpel2x = (mvx+1)>>1;
373 int hpel2y = (mvy+correction)>>1;
374 int filter2 = (hpel2x & 1) + ( (hpel2y & 1) <<1 );
376 src2 = src[filter2] + (hpel2y >> 1) * i_src_stride + (hpel2x >> 1);
378 pixel_avg( dst, i_dst_stride, src1, i_src_stride,
379 src2, i_src_stride, i_width, i_height );
383 mc_copy( src1, i_src_stride, dst, i_dst_stride, i_width, i_height );
387 static uint8_t *get_ref( uint8_t *src[4], int i_src_stride,
388 uint8_t *dst, int * i_dst_stride,
390 int i_width, int i_height )
392 uint8_t *src1, *src2;
394 int correction = (mvx&1) && (mvy&1) && ((mvx&2) ^ (mvy&2));
396 int hpel1y = (mvy+1-correction)>>1;
397 int filter1 = (hpel1x & 1) + ( (hpel1y & 1) << 1 );
399 src1 = src[filter1] + (hpel1y >> 1) * i_src_stride + (hpel1x >> 1);
401 if ( (mvx|mvy) & 1 ) /* qpel interpolation needed */
403 int hpel2x = (mvx+1)>>1;
404 int hpel2y = (mvy+correction)>>1;
405 int filter2 = (hpel2x & 1) + ( (hpel2y & 1) <<1 );
407 src2 = src[filter2] + (hpel2y >> 1) * i_src_stride + (hpel2x >> 1);
409 pixel_avg( dst, *i_dst_stride, src1, i_src_stride,
410 src2, i_src_stride, i_width, i_height );
416 *i_dst_stride = i_src_stride;
421 /* full chroma mc (ie until 1/8 pixel)*/
422 static void motion_compensation_chroma( uint8_t *src, int i_src_stride,
423 uint8_t *dst, int i_dst_stride,
425 int i_width, int i_height )
430 const int d8x = mvx&0x07;
431 const int d8y = mvy&0x07;
433 const int cA = (8-d8x)*(8-d8y);
434 const int cB = d8x *(8-d8y);
435 const int cC = (8-d8x)*d8y;
436 const int cD = d8x *d8y;
438 src += (mvy >> 3) * i_src_stride + (mvx >> 3);
439 srcp = &src[i_src_stride];
441 for( y = 0; y < i_height; y++ )
443 for( x = 0; x < i_width; x++ )
445 dst[x] = ( cA*src[x] + cB*src[x+1] +
446 cC*srcp[x] + cD*srcp[x+1] + 32 ) >> 6;
451 srcp += i_src_stride;
456 static void motion_compensation_chroma_sse( uint8_t *src, int i_src_stride,
457 uint8_t *dst, int i_dst_stride,
459 int i_width, int i_height )
462 motion_compensation_chroma(src, i_src_stride, dst, i_dst_stride,
463 mvx, mvy, i_width, i_height);
465 const int d8x = mvx&0x07;
466 const int d8y = mvy&0x07;
468 src += (mvy >> 3) * i_src_stride + (mvx >> 3);
470 x264_mc_chroma_sse(src, i_src_stride, dst, i_dst_stride,
471 d8x, d8y, i_height, i_width);
476 void x264_mc_init( int cpu, x264_mc_functions_t *pf )
478 pf->mc_luma = mc_luma;
479 pf->get_ref = get_ref;
480 pf->mc_chroma = motion_compensation_chroma;
482 pf->avg[PIXEL_16x16]= pixel_avg_16x16;
483 pf->avg[PIXEL_16x8] = pixel_avg_16x8;
484 pf->avg[PIXEL_8x16] = pixel_avg_8x16;
485 pf->avg[PIXEL_8x8] = pixel_avg_8x8;
486 pf->avg[PIXEL_8x4] = pixel_avg_8x4;
487 pf->avg[PIXEL_4x8] = pixel_avg_4x8;
488 pf->avg[PIXEL_4x4] = pixel_avg_4x4;
489 pf->avg[PIXEL_4x2] = pixel_avg_4x2;
490 pf->avg[PIXEL_2x4] = pixel_avg_2x4;
491 pf->avg[PIXEL_2x2] = pixel_avg_2x2;
493 pf->avg_weight[PIXEL_16x16]= pixel_avg_weight_16x16;
494 pf->avg_weight[PIXEL_16x8] = pixel_avg_weight_16x8;
495 pf->avg_weight[PIXEL_8x16] = pixel_avg_weight_8x16;
496 pf->avg_weight[PIXEL_8x8] = pixel_avg_weight_8x8;
497 pf->avg_weight[PIXEL_8x4] = pixel_avg_weight_8x4;
498 pf->avg_weight[PIXEL_4x8] = pixel_avg_weight_4x8;
499 pf->avg_weight[PIXEL_4x4] = pixel_avg_weight_4x4;
500 pf->avg_weight[PIXEL_4x2] = pixel_avg_weight_4x2;
501 pf->avg_weight[PIXEL_2x4] = pixel_avg_weight_2x4;
502 pf->avg_weight[PIXEL_2x2] = pixel_avg_weight_2x2;
505 if( cpu&X264_CPU_MMXEXT ) {
506 x264_mc_mmxext_init( pf );
507 pf->mc_chroma = motion_compensation_chroma_sse;
511 if( cpu&X264_CPU_SSE2 )
512 x264_mc_sse2_init( pf );
515 if( cpu&X264_CPU_ALTIVEC )
516 x264_mc_altivec_init( pf );
521 void get_funcs_mmx(pf_mc_t*, pf_mc_t*, pf_mc_t*);
522 void get_funcs_sse2(pf_mc_t*, pf_mc_t*, pf_mc_t*);
525 extern void x264_horizontal_filter_mmxext( uint8_t *dst, int i_dst_stride,
526 uint8_t *src, int i_src_stride,
527 int i_width, int i_height );
528 extern void x264_center_filter_mmxext( uint8_t *dst1, int i_dst1_stride,
529 uint8_t *dst2, int i_dst2_stride,
530 uint8_t *src, int i_src_stride,
531 int i_width, int i_height );
533 void x264_frame_filter( int cpu, x264_frame_t *frame )
535 const int x_inc = 16, y_inc = 16;
536 const int stride = frame->i_stride[0];
539 pf_mc_t int_h = mc_hh;
540 pf_mc_t int_v = mc_hv;
541 pf_mc_t int_hv = mc_hc;
545 if( cpu&X264_CPU_MMXEXT )
546 get_funcs_mmx(&int_h, &int_v, &int_hv);
550 if( cpu&X264_CPU_SSE2 )
551 get_funcs_sse2(&int_h, &int_v, &int_hv);
556 if ( cpu & X264_CPU_MMXEXT )
558 x264_horizontal_filter_mmxext(frame->filtered[1] - 8 * stride - 8, stride,
559 frame->plane[0] - 8 * stride - 8, stride,
560 stride - 48, frame->i_lines[0] + 16);
561 x264_center_filter_mmxext(frame->filtered[2] - 8 * stride - 8, stride,
562 frame->filtered[3] - 8 * stride - 8, stride,
563 frame->plane[0] - 8 * stride - 8, stride,
564 stride - 48, frame->i_lines[0] + 16);
569 for( y = -8; y < frame->i_lines[0]+8; y += y_inc )
571 uint8_t *p_in = frame->plane[0] + y * stride - 8;
572 uint8_t *p_h = frame->filtered[1] + y * stride - 8;
573 uint8_t *p_v = frame->filtered[2] + y * stride - 8;
574 uint8_t *p_hv = frame->filtered[3] + y * stride - 8;
575 for( x = -8; x < stride - 64 + 8; x += x_inc )
577 int_h( p_in, stride, p_h, stride, x_inc, y_inc );
578 int_v( p_in, stride, p_v, stride, x_inc, y_inc );
579 int_hv( p_in, stride, p_hv, stride, x_inc, y_inc );
590 void x264_frame_init_lowres( int cpu, x264_frame_t *frame )
593 const int i_stride = frame->i_stride[0];
594 const int i_stride2 = frame->i_stride_lowres;
595 const int i_width2 = i_stride2 - 64;
597 for( y = 0; y < frame->i_lines_lowres - 1; y++ )
599 uint8_t *src0 = &frame->plane[0][2*y*i_stride];
600 uint8_t *src1 = src0+i_stride;
601 uint8_t *src2 = src1+i_stride;
602 uint8_t *dst0 = &frame->lowres[0][y*i_stride2];
603 uint8_t *dsth = &frame->lowres[1][y*i_stride2];
604 uint8_t *dstv = &frame->lowres[2][y*i_stride2];
605 uint8_t *dstc = &frame->lowres[3][y*i_stride2];
606 for( x = 0; x < i_width2 - 1; x++ )
608 dst0[x] = (src0[2*x ] + src0[2*x+1] + src1[2*x ] + src1[2*x+1] + 2) >> 2;
609 dsth[x] = (src0[2*x+1] + src0[2*x+2] + src1[2*x+1] + src1[2*x+2] + 2) >> 2;
610 dstv[x] = (src1[2*x ] + src1[2*x+1] + src2[2*x ] + src2[2*x+1] + 2) >> 2;
611 dstc[x] = (src1[2*x+1] + src1[2*x+2] + src2[2*x+1] + src2[2*x+2] + 2) >> 2;
613 dst0[x] = (src0[2*x ] + src0[2*x+1] + src1[2*x ] + src1[2*x+1] + 2) >> 2;
614 dstv[x] = (src1[2*x ] + src1[2*x+1] + src2[2*x ] + src2[2*x+1] + 2) >> 2;
615 dsth[x] = (src0[2*x+1] + src1[2*x+1] + 1) >> 1;
616 dstc[x] = (src1[2*x+1] + src2[2*x+1] + 1) >> 1;
618 for( i = 0; i < 4; i++ )
619 memcpy( &frame->lowres[i][y*i_stride2], &frame->lowres[i][(y-1)*i_stride2], i_width2 );
621 for( y = 0; y < 16; y++ )
622 for( x = 0; x < 16; x++ )
623 frame->i_cost_est[x][y] = -1;
625 x264_frame_expand_border_lowres( frame );